Primo batch di moduli e configurazioni.

This commit is contained in:
Andrea Dell'Amico 2024-01-19 20:01:42 +01:00
parent 8e76a070a5
commit 73aa2f1bfb
Signed by: adellam
GPG Key ID: 147ABE6CEB9E20FF
50 changed files with 4242 additions and 2 deletions

View File

@ -1,3 +1,7 @@
# openstack-infrastructure-terraform
# OpenStack infrastructure via Terraform
Codice terraform per configurare i progetti OpenStack sulla infrastruttura cloud ISTI.
1. Laboratori.
2. Attività che richiedono progetti OpenStack dedicati. Sia legati ai laboratori, sia legati a servizi o gruppi di lavoro dell'istituto.
Codice terraform per configurare i progetti openstack.

View File

@ -0,0 +1,222 @@
# Define required providers
terraform {
required_version = ">= 0.14.0"
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
version = "~> 1.53.0"
}
}
}
provider "openstack" {
# cloud = "kdd-lab"
cloud = "ISTI-Cloud"
}
module "common_variables" {
source = "../../modules/labs_common_variables"
}
# Main module
module "main_private_net_and_dns_zone" {
source = "../../modules/labs_private_net_and_dns_zone"
dns_zone = {
zone_name = "kdd.cloud.isti.cnr.it."
email = "postmaster@isti.cnr.it"
description = "DNS primary zone for the kdd-lab project"
ttl = 8600
}
os_project_data = {
id = "1b45adf388934758b56d0dfdb4bfacf3"
}
main_private_network = {
name = "kdd-cloud-main"
description = "KDD private network (use this as the main network)"
}
main_private_subnet = {
name = "kdd-cloud-main-subnet"
description = "kdd main private subnet"
cidr = "10.12.0.0/21"
gateway_ip = "10.12.0.1"
allocation_start = "10.12.1.1"
allocation_end = "10.12.7.254"
}
external_router = {
name = "kdd-cloud-external-router"
description = "KDD main router"
}
default_firewall_rules_map = {
"ssh_from_isti_net" = {
description = "SSH from the ISTI network"
source = module.labs_common_variables.ssh_sources.isti_net_cidr
port_min = 22
port_max = 22
},
"ssh_from_s2i2s_vpn_1" = {
description = "SSH from the S2I2S VPN 1"
source = module.labs_common_variables.ssh_sources.s2i2s_vpn_1_cidr
port_min = 22
port_max = 22
},
"ssh_from_s2i2s_vpn_2" = {
description = "SSH from the S2I2S VPN 2"
source = module.labs_common_variables.ssh_sources.s2i2s_vpn_2_cidr
port_min = 22
port_max = 22
},
"http_from_everywhere" = {
description = "HTTP from everywhere"
source = "0.0.0.0/0"
port_min = 80
port_max = 80
},
"https_from_everywhere" = {
description = "HTTPS from everywhere"
source = "0.0.0.0/0"
port_min = 443
port_max = 443
}
}
}
output "dns_zone_id" {
value = module.main_private_net_and_dns_zone.dns_zone_id
}
output "main_private_network_id" {
value = module.main_private_net_and_dns_zone.main_private_network_id
}
output "main_subnet_network_id" {
value = module.main_private_net_and_dns_zone.main_subnet_network_id
}
output "external_gateway_ip" {
value = module.main_private_net_and_dns_zone.external_gateway_ip
}
# Module used
output "main_region" {
value = module.common_variables.main_region
}
output "external_network" {
value = module.common_variables.external_network
}
output "external_network_id" {
value = module.common_variables.external_network.id
}
output "floating_ip_pools" {
value = module.common_variables.floating_ip_pools
}
output "resolvers_ip" {
value = module.common_variables.resolvers_ip
}
output "mtu_size" {
value = module.common_variables.mtu_size
}
output "availability_zones_names" {
value = module.common_variables.availability_zones_names
}
output "availability_zone_no_gpu_name" {
value = module.common_variables.availability_zones_names.availability_zone_no_gpu
}
output "availability_zone_with_gpu_name" {
value = module.common_variables.availability_zones_names.availability_zone_with_gpu
}
output "ssh_sources" {
value = module.common_variables.ssh_sources
}
output "networks_with_d4s_services" {
value = module.common_variables.networks_with_d4s_services
}
output "ubuntu_1804" {
value = module.common_variables.ubuntu_1804
}
output "ubuntu_2204" {
value = module.common_variables.ubuntu_2204
}
output "centos_7" {
value = module.common_variables.centos_7
}
output "almalinux_9" {
value = module.common_variables.almalinux_9
}
output "ubuntu1804_data_file" {
value = module.common_variables.ubuntu1804_data_file
}
output "ubuntu2204_data_file" {
value = module.common_variables.ubuntu2204_data_file
}
output "el7_data_file" {
value = module.common_variables.el7_data_file
}
output "ssh_jump_proxy" {
value = module.common_variables.ssh_jump_proxy
}
output "internal_ca_data" {
value = module.common_variables.internal_ca_data
}
output "prometheus_server_data" {
value = module.common_variables.prometheus_server_data
}
output "shared_postgresql_server_data" {
value = module.common_variables.shared_postgresql_server_data
}
output "haproxy_l7_data" {
value = module.common_variables.haproxy_l7_data
}
output "resource_registry_addresses" {
value = module.common_variables.resource_registry_addresses
}
output "smartexecutor_addresses" {
value = module.common_variables.smartexecutor_addresses
}
#Added by Francesco
output "policy_list" {
value = module.common_variables.policy_list
}
#Added by Francesco
output "flavor_list" {
value = module.common_variables.flavor_list
}
#Added by Francesco
output "security_group_list" {
value = module.common_variables.security_group_list
}
#Added by Francesco
output "networks_list" {
value = module.common_variables.networks_list
}

View File

@ -0,0 +1,116 @@
#
# HAPROXY L7 behind the main Octavia balancer
#
# FIXME: terraform does not return the Octavia VRRP addresses, so we have to find them before creating the security group that allows the traffic between octavia and the haproxy instances
#
# openstack --os-cloud d4s-pre port list -f value | grep octavia-lb-vrrp
# 5cc2354e-4465-4a1d-8390-c214e208c6de octavia-lb-vrrp-72392023-a774-4b58-a025-c1e99c5d152a fa:16:3e:62:24:2c [{'subnet_id': 'cd77a2fd-4a36-4254-b1d0-70b3874c6d04', 'ip_address': '10.1.34.232'}] ACTIVE
# 8aa4e97f-723d-4a2a-b79f-912fa7651653 octavia-lb-vrrp-fbfcf712-0ceb-4a38-82da-0c9ebef5dff3 fa:16:3e:79:62:a5 [{'subnet_id': 'cd77a2fd-4a36-4254-b1d0-70b3874c6d04', 'ip_address': '10.1.33.229'}] ACTIVE
#
# Server group
#
resource "openstack_compute_servergroup_v2" "main_haproxy_l7" {
name = "main_haproxy_l7"
policies = ["anti-affinity"]
}
# Security group
resource "openstack_networking_secgroup_v2" "main_lb_to_haproxy_l7" {
name = "traffic_from_main_lb_to_haproxy_l7"
delete_default_rules = "true"
description = "Traffic coming the main L4 lb directed to the haproxy l7 servers"
}
resource "openstack_networking_secgroup_rule_v2" "haproxy_l7_1_peer" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Peer traffic from haproxy l7 1 to l7 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 10000
port_range_max = 10000
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy_l7_2_peer" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Peer traffic from haproxy l7 2 to l7 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 10000
port_range_max = 10000
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "octavia_to_haproxy_l7_80" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the octavia lb instance to HAPROXY l7 port 80"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
# remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_1
remote_ip_prefix = var.main_private_subnet.cidr
}
resource "openstack_networking_secgroup_rule_v2" "octavia_to_haproxy_l7_443" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the octavia lb instance to HAPROXY l7 port 443"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
# remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_1
remote_ip_prefix = var.main_private_subnet.cidr
}
resource "openstack_networking_secgroup_rule_v2" "octavia_to_haproxy_l7_8880" {
security_group_id = openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.id
description = "Traffic from the octavia lb instance to HAPROXY l7 port 8880"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8880
port_range_max = 8880
# remote_ip_prefix = var.octavia_information.octavia_vrrp_ip_1
remote_ip_prefix = var.main_private_subnet.cidr
}
# Instance
resource "openstack_compute_instance_v2" "main_haproxy_l7" {
count = var.haproxy_l7_data.vm_count
name = format("%s-%02d", var.haproxy_l7_data.name, count.index + 1)
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.haproxy_l7_data.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name, openstack_networking_secgroup_v2.main_lb_to_haproxy_l7.name]
scheduler_hints {
group = openstack_compute_servergroup_v2.main_haproxy_l7.id
}
block_device {
uuid = var.ubuntu_2204.uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
fixed_ip_v4 = var.main_haproxy_l7_ip.* [count.index]
}
user_data = file("${var.ubuntu2204_data_file}")
# Do not replace the instance when the ssh key changes
lifecycle {
ignore_changes = [
# Ignore changes to tags, e.g. because a management agent
# updates these based on some ruleset managed elsewhere.
key_pair, user_data, network
]
}
}

View File

@ -0,0 +1,30 @@
resource "openstack_compute_instance_v2" "internal_ca" {
name = var.internal_ca_data.name
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.internal_ca_data.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name]
block_device {
uuid = var.ubuntu_2204.uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
fixed_ip_v4 = var.basic_services_ip.ca
}
user_data = file("${var.ubuntu2204_data_file}")
# Do not replace the instance when the ssh key changes
lifecycle {
ignore_changes = [
# Ignore changes to tags, e.g. because a management agent
# updates these based on some ruleset managed elsewhere.
key_pair, user_data, network
]
}
}

View File

@ -0,0 +1,187 @@
# Main load balancer. L4, backed by Octavia
resource "openstack_lb_loadbalancer_v2" "main_lb" {
vip_subnet_id = var.main_private_subnet_id
name = var.octavia_information.main_lb_name
description = var.octavia_information.main_lb_description
flavor_id = var.octavia_information.octavia_flavor_id
vip_address = var.basic_services_ip.octavia_main
# availability_zone = var.availability_zones_names.availability_zone_no_gpu
loadbalancer_provider = "amphora"
}
# Allocate a floating IP
resource "openstack_networking_floatingip_v2" "main_lb_ip" {
pool = var.floating_ip_pools.main_public_ip_pool
# The DNS association does not work because of a bug in the OpenStack API
# dns_name = "main-lb"
# dns_domain = var.dns_zone.zone_name
description = var.octavia_information.main_lb_description
}
resource "openstack_networking_floatingip_associate_v2" "main_lb" {
floating_ip = openstack_networking_floatingip_v2.main_lb_ip.address
port_id = openstack_lb_loadbalancer_v2.main_lb.vip_port_id
}
locals {
recordset_name = "${var.octavia_information.main_lb_hostname}.${var.dns_zone.zone_name}"
}
resource "openstack_dns_recordset_v2" "main_lb_dns_recordset" {
zone_id = var.dns_zone_id
name = local.recordset_name
description = "Public IP address of the main load balancer"
ttl = 8600
type = "A"
records = [openstack_networking_floatingip_v2.main_lb_ip.address]
}
# Main HAPROXY stats listener
resource "openstack_lb_listener_v2" "main_haproxy_stats_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
protocol = "TCP"
protocol_port = 8880
description = "Listener for the stats of the main HAPROXY instances"
name = "main_haproxy_stats_listener"
allowed_cidrs = [var.ssh_sources.d4s_vpn_1_cidr, var.ssh_sources.d4s_vpn_2_cidr, var.ssh_sources.s2i2s_vpn_1_cidr, var.ssh_sources.s2i2s_vpn_2_cidr]
}
resource "openstack_lb_pool_v2" "main_haproxy_stats_pool" {
listener_id = openstack_lb_listener_v2.main_haproxy_stats_listener.id
protocol = "TCP"
lb_method = "LEAST_CONNECTIONS"
name = "main-haproxy-lb-stats"
description = "Pool for the stats of the main HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
}
resource "openstack_lb_members_v2" "main_haproxy_stats_pool_members" {
pool_id = openstack_lb_pool_v2.main_haproxy_stats_pool.id
member {
name = "haproxy l7 1"
address = var.basic_services_ip.haproxy_l7_1
protocol_port = 8880
}
member {
name = "haproxy l7 2"
address = var.basic_services_ip.haproxy_l7_2
protocol_port = 8880
}
}
resource "openstack_lb_monitor_v2" "main_haproxy_stats_monitor" {
pool_id = openstack_lb_pool_v2.main_haproxy_stats_pool.id
name = "main_haproxy_stats_monitor"
type = "TCP"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
# Main HAPROXY HTTP
resource "openstack_lb_listener_v2" "main_haproxy_http_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
protocol = "TCP"
protocol_port = 80
description = "HTTP listener of the main HAPROXY instances"
name = "main_haproxy_http_listener"
admin_state_up = true
}
resource "openstack_lb_pool_v2" "main_haproxy_http_pool" {
listener_id = openstack_lb_listener_v2.main_haproxy_http_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "main-haproxy-lb-http"
description = "Pool for the HTTP listener of the main HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
}
resource "openstack_lb_members_v2" "main_haproxy_http_pool_members" {
pool_id = openstack_lb_pool_v2.main_haproxy_http_pool.id
member {
name = "haproxy l7 1"
address = var.basic_services_ip.haproxy_l7_1
protocol_port = 80
}
member {
name = "haproxy l7 2"
address = var.basic_services_ip.haproxy_l7_2
protocol_port = 80
}
}
resource "openstack_lb_monitor_v2" "main_haproxy_http_monitor" {
pool_id = openstack_lb_pool_v2.main_haproxy_http_pool.id
name = "main_haproxy_http_monitor"
type = "HTTP"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
# Main HAPROXY HTTPS
resource "openstack_lb_listener_v2" "main_haproxy_https_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
protocol = "TCP"
protocol_port = 443
description = "HTTPS listener of the main HAPROXY instances"
name = "main_haproxy_https_listener"
admin_state_up = true
}
resource "openstack_lb_pool_v2" "main_haproxy_https_pool" {
listener_id = openstack_lb_listener_v2.main_haproxy_https_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "main-haproxy-lb-https"
description = "Pool for the HTTPS listener of the main HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
}
resource "openstack_lb_members_v2" "main_haproxy_https_pool_members" {
pool_id = openstack_lb_pool_v2.main_haproxy_https_pool.id
member {
name = "haproxy l7 1"
address = var.basic_services_ip.haproxy_l7_1
protocol_port = 443
}
member {
name = "haproxy l7 2"
address = var.basic_services_ip.haproxy_l7_2
protocol_port = 443
}
}
resource "openstack_lb_monitor_v2" "main_haproxy_https_monitor" {
pool_id = openstack_lb_pool_v2.main_haproxy_https_pool.id
name = "main_haproxy_https_monitor"
type = "HTTPS"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
output "main_loadbalancer_ip" {
description = "Main Load balancer IP address"
value = openstack_lb_loadbalancer_v2.main_lb.vip_address
}

View File

@ -0,0 +1,126 @@
output "main_region" {
value = var.main_region
}
output "external_network" {
value = var.external_network
}
output "external_network_id" {
value = var.external_network.id
}
output "floating_ip_pools" {
value = var.floating_ip_pools
}
output "resolvers_ip" {
value = var.resolvers_ip
}
output "mtu_size" {
value = var.mtu_size
}
output "availability_zones_names" {
value = var.availability_zones_names
}
output "availability_zone_no_gpu_name" {
value = var.availability_zones_names.availability_zone_no_gpu
}
output "availability_zone_with_gpu_name" {
value = var.availability_zones_names.availability_zone_with_gpu
}
output "ssh_sources" {
value = var.ssh_sources
}
output "networks_with_d4s_services" {
value = var.networks_with_d4s_services
}
output "ubuntu_1804" {
value = var.ubuntu_1804
}
output "ubuntu_2204" {
value = var.ubuntu_2204
}
output "centos_7" {
value = var.centos_7
}
output "almalinux_9" {
value = var.almalinux_9
}
output "ubuntu1804_data_file" {
value = var.ubuntu1804_data_file
}
output "ubuntu2204_data_file" {
value = var.ubuntu2204_data_file
}
output "el7_data_file" {
value = var.el7_data_file
}
output "ssh_jump_proxy" {
value = var.ssh_jump_proxy
}
output "internal_ca_data" {
value = var.internal_ca_data
}
output "prometheus_server_data" {
value = var.prometheus_server_data
}
output "shared_postgresql_server_data" {
value = var.shared_postgresql_server_data
}
output "haproxy_l7_data" {
value = var.haproxy_l7_data
}
output "resource_registry_addresses" {
value = var.resource_registry_addresses
}
output "smartexecutor_addresses" {
value = var.smartexecutor_addresses
}
#Added by Francesco
output "policy_list" {
value = var.policy_list
}
#Added by Francesco
output "flavor_list" {
value = var.flavor_list
}
#Added by Francesco
output "security_group_list" {
value = var.security_group_list
}
#Added by Francesco
output "networks_list" {
value = var.networks_list
}
# output "default_security_group_name" {
# value = var.default_security_group_name
# }

View File

@ -0,0 +1,96 @@
# PostgreSQL shared server
# Network
resource "openstack_networking_network_v2" "shared_postgresql_net" {
name = var.shared_postgresql_server_data.network_name
admin_state_up = "true"
external = "false"
description = var.shared_postgresql_server_data.network_description
dns_domain = var.dns_zone.zone_name
mtu = var.mtu_size
port_security_enabled = true
shared = false
region = var.main_region
}
# Subnet
resource "openstack_networking_subnet_v2" "shared_postgresql_subnet" {
name = "shared-postgresql-subnet"
description = "subnet used to connect to the shared PostgreSQL service"
network_id = openstack_networking_network_v2.shared_postgresql_net.id
cidr = var.shared_postgresql_server_data.network_cidr
dns_nameservers = var.resolvers_ip
ip_version = 4
enable_dhcp = true
no_gateway = true
allocation_pool {
start = var.shared_postgresql_server_data.allocation_pool_start
end = var.shared_postgresql_server_data.allocation_pool_end
}
}
# Security group
resource "openstack_networking_secgroup_v2" "shared_postgresql_access" {
name = "access_to_the_shared_postgresql_service"
delete_default_rules = "true"
description = "Access the shared PostgreSQL service using the dedicated network"
}
resource "openstack_networking_secgroup_rule_v2" "shared_postgresql_access_from_dedicated_subnet" {
security_group_id = openstack_networking_secgroup_v2.shared_postgresql_access.id
description = "Allow connections to port 5432 from the 192.168.2.0/22 network"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 5432
port_range_max = 5432
remote_ip_prefix = var.shared_postgresql_server_data.network_cidr
}
# Block device
resource "openstack_blockstorage_volume_v3" "shared_postgresql_data_vol" {
name = var.shared_postgresql_server_data.vol_data_name
size = var.shared_postgresql_server_data.vol_data_size
}
# Instance
resource "openstack_compute_instance_v2" "shared_postgresql_server" {
name = var.shared_postgresql_server_data.name
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.shared_postgresql_server_data.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name, openstack_networking_secgroup_v2.shared_postgresql_access.name]
block_device {
uuid = var.ubuntu_2204.uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
}
network {
name = var.shared_postgresql_server_data.network_name
fixed_ip_v4 = var.shared_postgresql_server_data.server_ip
}
user_data = file("${var.ubuntu2204_data_file}")
# Do not replace the instance when the ssh key changes
lifecycle {
ignore_changes = [
# Ignore changes to tags, e.g. because a management agent
# updates these based on some ruleset managed elsewhere.
key_pair, user_data, network
]
}
}
resource "openstack_compute_volume_attach_v2" "shared_postgresql_data_attach_vol" {
instance_id = openstack_compute_instance_v2.shared_postgresql_server.id
volume_id = openstack_blockstorage_volume_v3.shared_postgresql_data_vol.id
device = var.shared_postgresql_server_data.vol_data_device
depends_on = [openstack_compute_instance_v2.shared_postgresql_server]
}

View File

@ -0,0 +1,77 @@
# Promertheus server. A floating IP is required
resource "openstack_blockstorage_volume_v3" "prometheus_data_vol" {
name = var.prometheus_server_data.vol_data_name
size = var.prometheus_server_data.vol_data_size
}
resource "openstack_compute_instance_v2" "prometheus_server" {
name = var.prometheus_server_data.name
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.prometheus_server_data.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name, openstack_networking_secgroup_v2.restricted_web.name, openstack_networking_secgroup_v2.prometheus_access_from_grafana.name]
block_device {
uuid = var.ubuntu_2204.uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
fixed_ip_v4 = var.basic_services_ip.prometheus
}
user_data = file("${var.ubuntu2204_data_file}")
# Do not replace the instance when the ssh key changes
lifecycle {
ignore_changes = [
# Ignore changes to tags, e.g. because a management agent
# updates these based on some ruleset managed elsewhere.
key_pair, user_data, network
]
}
}
resource "openstack_compute_volume_attach_v2" "prometheus_data_attach_vol" {
instance_id = openstack_compute_instance_v2.prometheus_server.id
volume_id = openstack_blockstorage_volume_v3.prometheus_data_vol.id
device = var.prometheus_server_data.vol_data_device
}
# Floating IP and DNS record
resource "openstack_networking_floatingip_v2" "prometheus_server_ip" {
pool = var.floating_ip_pools.main_public_ip_pool
# The DNS association does not work because of a bug in the OpenStack API
description = "Prometheus server"
}
resource "openstack_compute_floatingip_associate_v2" "prometheus_server" {
floating_ip = openstack_networking_floatingip_v2.prometheus_server_ip.address
instance_id = openstack_compute_instance_v2.prometheus_server.id
}
locals {
prometheus_recordset_name = "${var.prometheus_server_data.name}.${var.dns_zone.zone_name}"
alertmanager_recordset_name = "alertmanager.${var.dns_zone.zone_name}"
}
resource "openstack_dns_recordset_v2" "prometheus_server_recordset" {
zone_id = var.dns_zone_id
name = local.prometheus_recordset_name
description = "Public IP address of the Prometheus server"
ttl = 8600
type = "A"
records = [openstack_networking_floatingip_v2.prometheus_server_ip.address]
}
resource "openstack_dns_recordset_v2" "alertmanager_server_recordset" {
zone_id = var.dns_zone_id
name = local.alertmanager_recordset_name
description = "Prometheus alertmanager"
ttl = 8600
type = "CNAME"
records = [local.prometheus_recordset_name]
}

View File

@ -0,0 +1,373 @@
#
# This is the security group that should be added to every instance
resource "openstack_networking_secgroup_v2" "default" {
name = var.default_security_group_name
delete_default_rules = "true"
description = "Default security group with rules for ssh access via jump proxy, prometheus scraping"
}
resource "openstack_networking_secgroup_rule_v2" "egress-ipv4" {
security_group_id = openstack_networking_secgroup_v2.default.id
direction = "egress"
ethertype = "IPv4"
}
resource "openstack_networking_secgroup_rule_v2" "ingress-icmp" {
security_group_id = openstack_networking_secgroup_v2.default.id
description = "Allow ICMP from remote"
direction = "ingress"
ethertype = "IPv4"
remote_ip_prefix = "0.0.0.0/0"
protocol = "icmp"
}
resource "openstack_networking_secgroup_rule_v2" "ssh-jump-proxy" {
security_group_id = openstack_networking_secgroup_v2.default.id
description = "SSH traffic from the jump proxy"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = var.basic_services_ip.ssh_jump_cidr
}
resource "openstack_networking_secgroup_rule_v2" "prometheus-node" {
security_group_id = openstack_networking_secgroup_v2.default.id
description = "Prometheus access to the node exporter"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 9100
port_range_max = 9100
remote_ip_prefix = var.basic_services_ip.prometheus_cidr
}
#
# SSH access to the jump proxy. Used by the jump proxy VM only
resource "openstack_networking_secgroup_v2" "access_to_the_jump_proxy" {
name = "ssh_access_to_the_jump_node"
delete_default_rules = "true"
description = "Security group that allows SSH access to the jump node from a limited set of sources"
}
resource "openstack_networking_secgroup_rule_v2" "ssh-s2i2s-vpn-1" {
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from S2I2S VPN 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = var.ssh_sources.s2i2s_vpn_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "ssh-s2i2s-vpn-2" {
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from S2I2S VPN 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = var.ssh_sources.s2i2s_vpn_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "ssh-d4s-vpn-1" {
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from D4Science VPN 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = var.ssh_sources.d4s_vpn_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "ssh-d4s-vpn-2" {
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from D4Science VPN 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = var.ssh_sources.d4s_vpn_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "ssh-shell-d4s" {
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from shell.d4science.org"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = var.ssh_sources.shell_d4s_cidr
}
resource "openstack_networking_secgroup_rule_v2" "ssh-infrascience-net" {
security_group_id = openstack_networking_secgroup_v2.access_to_the_jump_proxy.id
description = "SSH traffic from the InfraScience network"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = var.ssh_sources.infrascience_net_cidr
}
# Debug via tunnel from the jump proxy node
resource "openstack_networking_secgroup_v2" "debugging" {
name = "debugging_from_jump_node"
delete_default_rules = "true"
description = "Security group that allows web app debugging via tunnel from the ssh jump node"
}
resource "openstack_networking_secgroup_rule_v2" "shell_8100" {
security_group_id = openstack_networking_secgroup_v2.debugging.id
description = "Tomcat debug on port 8100 from the shell jump proxy"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8100
port_range_max = 8100
remote_ip_prefix = var.basic_services_ip.ssh_jump_cidr
}
resource "openstack_networking_secgroup_rule_v2" "shell_80" {
security_group_id = openstack_networking_secgroup_v2.debugging.id
description = "http debug port 80 from the shell jump proxy"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = var.basic_services_ip.ssh_jump_cidr
}
resource "openstack_networking_secgroup_rule_v2" "shell_443" {
security_group_id = openstack_networking_secgroup_v2.debugging.id
description = "https debug port 443 from the shell jump proxy"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.basic_services_ip.ssh_jump_cidr
}
# Traffic from the main HAPROXY load balancers
# Use on the web services that are exposed through the main HAPROXY
resource "openstack_networking_secgroup_v2" "traffic_from_main_haproxy" {
name = "traffic_from_the_main_load_balancers"
delete_default_rules = "true"
description = "Allow traffic from the main L7 HAPROXY load balancers"
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-1-80" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-2-80" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-1-443" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTPS traffic from HAPROXY L7 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-2-443" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTPS traffic from HAPROXY L7 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-1-8080" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8080
port_range_max = 8080
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-2-8080" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8080
port_range_max = 8080
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-1-8888" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8888
port_range_max = 8888
remote_ip_prefix = var.basic_services_ip.haproxy_l7_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "haproxy-l7-2-8888" {
security_group_id = openstack_networking_secgroup_v2.traffic_from_main_haproxy.id
description = "HTTP traffic from HAPROXY L7 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 8888
port_range_max = 8888
remote_ip_prefix = var.basic_services_ip.haproxy_l7_2_cidr
}
# Security group that exposes web services directly. A floating IP is required.
resource "openstack_networking_secgroup_v2" "public_web" {
name = "public_web_service"
delete_default_rules = "true"
description = "Security group that allows HTTPS and HTTP from everywhere, for the services that are not behind any load balancer"
}
resource "openstack_networking_secgroup_rule_v2" "public_http" {
security_group_id = openstack_networking_secgroup_v2.public_web.id
description = "Allow HTTP from everywhere"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = "0.0.0.0/0"
}
resource "openstack_networking_secgroup_rule_v2" "public_https" {
security_group_id = openstack_networking_secgroup_v2.public_web.id
description = "Allow HTTPS from everywhere"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = "0.0.0.0/0"
}
# HTTP and HTTPS access through the VPN nodes. Floating IP is required
resource "openstack_networking_secgroup_v2" "restricted_web" {
name = "restricted_web_service"
delete_default_rules = "true"
description = "Security group that restricts HTTPS sources to the VPN nodes and shell.d4science.org. HTTP is open to all, because letsencrypt"
}
resource "openstack_networking_secgroup_rule_v2" "http_from_everywhere" {
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTP from everywhere"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = "0.0.0.0/0"
}
resource "openstack_networking_secgroup_rule_v2" "https_from_d4s_vpn_1" {
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTPS from D4Science VPN 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.ssh_sources.d4s_vpn_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "https_from_d4s_vpn_2" {
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTPS from D4Science VPN 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.ssh_sources.d4s_vpn_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "https_from_s2i2s_vpn_1" {
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTPS from S2I2S VPN 1"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.ssh_sources.s2i2s_vpn_1_cidr
}
resource "openstack_networking_secgroup_rule_v2" "https_from_s2i2s_vpn_2" {
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTPS from S2I2S VPN 2"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.ssh_sources.s2i2s_vpn_2_cidr
}
resource "openstack_networking_secgroup_rule_v2" "https_from_shell_d4s" {
security_group_id = openstack_networking_secgroup_v2.restricted_web.id
description = "Allow HTTPS from shell.d4science.org"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.ssh_sources.shell_d4s_cidr
}
resource "openstack_networking_secgroup_v2" "prometheus_access_from_grafana" {
name = "prometheus_access_from_grafana"
delete_default_rules = "true"
description = "The public grafana server must be able to get data from Prometheus"
}
resource "openstack_networking_secgroup_rule_v2" "grafana_d4s" {
security_group_id = openstack_networking_secgroup_v2.prometheus_access_from_grafana.id
description = "Allow HTTPS from grafana.d4science.org"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = var.prometheus_server_data.public_grafana_server_cidr
}

View File

@ -0,0 +1,56 @@
# VM used as jump proxy. A floating IP is required
resource "openstack_compute_instance_v2" "ssh_jump_proxy" {
name = var.ssh_jump_proxy.name
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.ssh_jump_proxy.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name, openstack_networking_secgroup_v2.access_to_the_jump_proxy.name]
block_device {
uuid = var.ubuntu_2204.uuid
source_type = "image"
volume_size = 30
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
fixed_ip_v4 = var.basic_services_ip.ssh_jump
}
user_data = file("${var.ubuntu2204_data_file}")
# Do not replace the instance when the ssh key changes
lifecycle {
ignore_changes = [
# Ignore changes to tags, e.g. because a management agent
# updates these based on some ruleset managed elsewhere.
key_pair, user_data, network
]
}
}
# Floating IP and DNS record
resource "openstack_networking_floatingip_v2" "ssh_jump_proxy_ip" {
pool = var.floating_ip_pools.main_public_ip_pool
# The DNS association does not work because of a bug in the OpenStack API
description = "SSH Proxy Jump Server"
}
resource "openstack_compute_floatingip_associate_v2" "ssh_jump_proxy" {
floating_ip = openstack_networking_floatingip_v2.ssh_jump_proxy_ip.address
instance_id = openstack_compute_instance_v2.ssh_jump_proxy.id
}
locals {
ssh_recordset_name = "${var.ssh_jump_proxy.name}.${var.dns_zone.zone_name}"
}
resource "openstack_dns_recordset_v2" "ssh_jump_proxy_recordset" {
zone_id = var.dns_zone_id
name = local.ssh_recordset_name
description = "Public IP address of the SSH Proxy Jump server"
ttl = 8600
type = "A"
records = [openstack_networking_floatingip_v2.ssh_jump_proxy_ip.address]
}

View File

@ -0,0 +1,27 @@
# Define required providers
terraform {
required_version = ">= 0.14.0"
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
version = "~> 1.53.0"
}
}
}
data "terraform_remote_state" "privnet_dns_router" {
backend = "local"
config = {
path = "../project-setup/terraform.tfstate"
}
}
# module "common_variables" {
# source = "../../modules/common_variables"
# }
# module "ssh_settings" {
# source = "../../modules/ssh-key-ref"
# }

View File

@ -0,0 +1,276 @@
# Global definitions
variable "main_region" {
type = string
default = "isti_area_pi_1"
}
variable "external_network" {
type = map(string)
default = {
name = "external-network"
id = "1d2ff137-6ff7-4017-be2b-0d6c4af2353b"
}
}
variable "floating_ip_pools" {
type = map(string)
default = {
main_public_ip_pool = "external-network"
}
}
variable "resolvers_ip" {
type = list(string)
default = ["146.48.29.97", "146.48.29.98", "146.48.29.99"]
}
variable "mtu_size" {
type = number
default = 8942
}
variable "availability_zones_names" {
type = map(string)
default = {
availability_zone_no_gpu = "cnr-isti-nova-a"
availability_zone_with_gpu = "cnr-isti-nova-gpu-a"
}
}
variable "ubuntu_1804" {
type = map(string)
default = {
name = "Ubuntu-Bionic-18.04"
uuid = "7ed6a2cd-2b07-482e-8ce4-f018dff16c89"
user_data_file = "../../openstack_vm_data_scripts/ubuntu1804.sh"
}
}
variable "ubuntu_2204" {
type = map(string)
default = {
name = "Ubuntu-Jammy-22.04"
uuid = "54768889-8556-4be4-a2eb-82a4d9b34627"
user_data_file = "../../openstack_vm_data_scripts/ubuntu2204.sh"
}
}
variable "centos_7" {
type = map(string)
default = {
name = "CentOS-7"
uuid = "f0187a99-64f6-462a-ab5f-ef52fe62f2ca"
}
}
variable "almalinux_9" {
type = map(string)
default = {
name = "AlmaLinux-9.0-20220718"
uuid = "541650fc-dd19-4f38-bb1d-7333ed9dd688"
}
}
variable "ubuntu1804_data_file" {
default = "../../openstack_vm_data_scripts/ubuntu1804.sh"
}
variable "ubuntu2204_data_file" {
default = "../../openstack_vm_data_scripts/ubuntu2204.sh"
}
variable "el7_data_file" {
default = "../../openstack_vm_data_scripts/el7.sh"
}
variable "ssh_jump_proxy" {
type = map(string)
default = {
name = "ssh-jump-proxy"
flavor = "m2.small"
}
}
variable "internal_ca_data" {
type = map(string)
default = {
name = "ca"
flavor = "m1.small"
}
}
variable "prometheus_server_data" {
type = map(string)
default = {
name = "prometheus"
flavor = "m1.medium"
vol_data_name = "prometheus-data"
vol_data_size = "100"
vol_data_device = "/dev/vdb"
public_grafana_server_cidr = "146.48.122.132/32"
}
}
variable "shared_postgresql_server_data" {
type = map(string)
default = {
name = "shared-postgresql-server"
flavor = "m1.medium"
vol_data_name = "shared-postgresql-data"
vol_data_size = "100"
vol_data_device = "/dev/vdb"
# vol_backup_name = ""
# vol_backup_size = ""
# vol_backup_device = ""
network_name = "postgresql-srv-net"
network_description = "Network used to communicate with the shared postgresql service"
network_cidr = "192.168.0.0/22"
allocation_pool_start = "192.168.0.100"
allocation_pool_end = "192.168.3.254"
server_ip = "192.168.0.5"
server_cidr = "192.168.0.5/22"
}
}
variable "haproxy_l7_data" {
type = map(string)
default = {
name = "main-haproxy-l7"
haproxy_1 = "haproxy-l7-1"
haproxy_2 = "haproxy-l7-2"
flavor = "m1.medium"
vm_count = "2"
}
}
variable "resource_registry_addresses" {
type = map(string)
default = {
}
}
variable "smartexecutor_addresses" {
type = map(string)
default = {
}
}
# Added by Francesco
# Create in the path 'modules/ssh-key-ref' the file 'ssh-key-ref-outputs.tf'
# with the following outputs:
# output "ssh_key_file" {
# value = "~/.ssh/{YOUR_PRIVATE_KEYNAME}"
# sensitive = true
# }
# output "ssh_key_name" {
# value = "{YOUR_KEYNAME}"
# sensitive = false
# }
# Then you can use above outputs in your 'file.tf' (if it contains the soft link to variables.tf) as:
# module.ssh_settings.ssh_key_file
# module.ssh_settings.ssh_key_name
# e.g.
#
# resource "openstack_compute_instance_v2" "geoserver" {
# key_pair = module.ssh_settings.ssh_key_name
# Added by Francesco
variable "flavor_list" {
type = map(string)
default = {
c1_small = "c1.small" #RAM 2 - VCPUs 2
c1_medium = "c1.medium" #RAM 4 - VCPUs 4
c1_large = "c1.large" #RAM 8 - VCPUs 8
c2_large = "c2.large" #RAM 16 -VCPUs 16
m1_medium = "m1.medium" #RAM 4 - VCPUs 2
m1_large = "m1.large" #RAM 8 - VCPUs 4
m1_xlarge = "m1.xlarge" #RAM 16 - VCPUs 8
m1_xxl = "m1.xxl" #RAM 32 - VCPUS 16
m2_small = "m2.small" #RAM 8 - VCPUs 2
m2_medium = "m2.medium" #RAM 16 - VCPUs 4
m2_large = "m2.large" #RAM 32 - VCPUs 8
m3_large = "m3.large" #RAM 64 - VCPUs 16
}
}
# Added by Francesco
variable "security_group_list" {
type = map(string)
default = {
postgreSQL = "PostgreSQL service"
acaland = "acaland's dev machine"
haproxy = "traffic_from_main_lb_to_haproxy_l7"
access_to_orientdb = "access_to_orientdb"
dataminer-publish = "dataminer-publish"
docker_swarm_NFS = "Docker Swarm NFS"
public_HTTPS = "Public HTTPS"
orientdb_internal_docker_traffic = "orientdb_internal_docker_traffic"
limited_SSH_access = "Limited SSH access"
debugging_from_jump_node = "debugging_from_jump_node"
access_to_the_timescaledb_service = "access_to_the_timescaledb_service"
docker_swarm = "Docker Swarm"
http_and_https_from_the_load_balancers = "traffic_from_the_main_load_balancers"
limited_HTTPS_access = "restricted_web_service"
mongo = "mongo"
limited_SSH_access = "Limited SSH access"
default = "default"
cassandra = "Cassandra"
access_to_orientdb_se = "access_to_orientdb_se"
}
}
# Added by Francesco
variable "policy_list" {
type = map(string)
default = {
soft_anti_affinity = "soft-anti-affinity"
anti_affinity = "anti-affinity"
affinity = "affinity"
soft_affinity = "soft-affinity"
}
}
variable "networks_list" {
type = map(string)
default = {
shared_postgresql = "postgresql-srv-net"
swarm = "swarm-nfs-net"
timescaledb = "timescaledb-net"
orientdb = "orientdb-net"
orientdb_se = "orientdb-se-net"
}
}
variable "ssh_sources" {
type = map(string)
default = {
s2i2s_vpn_1_cidr = "146.48.28.10/32"
s2i2s_vpn_2_cidr = "146.48.28.11/32"
d4s_vpn_1_cidr = "146.48.122.27/32"
d4s_vpn_2_cidr = "146.48.122.49/32"
shell_d4s_cidr = "146.48.122.95/32"
infrascience_net_cidr = "146.48.122.0/23"
}
}
variable "networks_with_d4s_services" {
type = map(string)
default = {
"isti_net" = "146.48.80.0/21"
"s2i2s_net" = "146.48.28.0/22"
"infrascience_net" = "146.48.122.0/23"
"garr_ct1_net" = "90.147.166.0/23"
"garr_pa1_net" = "90.147.188.0/23"
"garr_na_net" = "90.147.152.0/24"
}
}
# variable "default_security_group_name" {
# default = "default_for_all"
# }

View File

@ -0,0 +1,15 @@
# Module used
module "ssh_settings" {
source = "../../modules/ssh-key-ref"
}
resource "openstack_dns_recordset_v2" "add_dns_recordset" {
for_each = var.dns_resources_map
zone_id = each.value.zone_id
name = each.value.name
description = each.value.description
ttl = each.value.ttl
type = each.value.type
records = each.value.records
}

View File

@ -0,0 +1,4 @@
output "dns_resources_map" {
value = var.dns_resources_map
}

View File

@ -0,0 +1,11 @@
# Define required providers
terraform {
required_version = ">= 0.14.0"
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
version = "~> 1.53.0"
}
}
}

View File

@ -0,0 +1,23 @@
#Default DNS resource is EMPTY. Override it to create a proper plan
variable "dns_resources_map" {
type = map(object({
zone_id = string
name = string
description = string
ttl = number
type = string
records = list(string)
}))
default = {
dns_record = {
zone_id = ""
name = "",
description = "",
ttl = 8600,
type = "CNAME",
records = []
}
}
}

View File

@ -0,0 +1,532 @@
#
# Server groups for both the masters and the workers
#
resource "openstack_compute_servergroup_v2" "swarm_masters" {
name = "swarm_masters"
policies = ["anti-affinity"]
}
resource "openstack_compute_servergroup_v2" "swarm_workers" {
name = "swarm_workers"
policies = ["soft-anti-affinity"]
}
#
# Network for the NFS traffic
#
resource "openstack_networking_network_v2" "swarm_nfs_net" {
name = var.swarm_nfs_private_network.network_name
admin_state_up = "true"
external = "false"
description = var.swarm_nfs_private_network.network_description
dns_domain = var.dns_zone.zone_name
mtu = var.mtu_size
port_security_enabled = true
shared = false
region = var.main_region
}
# Subnet
resource "openstack_networking_subnet_v2" "swarm_nfs_subnet" {
name = "swarm-nfs-net"
description = "Subnet used by the Swarm cluster and the NFS service"
network_id = openstack_networking_network_v2.swarm_nfs_net.id
cidr = var.swarm_nfs_private_network.network_cidr
dns_nameservers = var.resolvers_ip
ip_version = 4
enable_dhcp = true
no_gateway = true
allocation_pool {
start = var.swarm_nfs_private_network.allocation_pool_start
end = var.swarm_nfs_private_network.allocation_pool_end
}
}
#
# Security groups
#
resource "openstack_networking_secgroup_v2" "swarm_internal_traffic" {
name = "swarm_internal_docker_traffic"
delete_default_rules = "true"
description = "Traffic between the Docker Swarm nodes"
}
resource "openstack_networking_secgroup_rule_v2" "everything_udp" {
security_group_id = openstack_networking_secgroup_v2.swarm_internal_traffic.id
description = "UDP traffic between Swarm nodes"
direction = "ingress"
ethertype = "IPv4"
protocol = "udp"
remote_ip_prefix = var.main_private_subnet.cidr
}
resource "openstack_networking_secgroup_rule_v2" "everything_tcp" {
security_group_id = openstack_networking_secgroup_v2.swarm_internal_traffic.id
description = "TCP traffic between Swarm nodes"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
remote_ip_prefix = var.main_private_subnet.cidr
}
resource "openstack_networking_secgroup_v2" "swarm_nfs_traffic" {
name = "docker_swarm_nfs"
delete_default_rules = "true"
description = "Traffic between Docker Swarm and the NFS service"
}
resource "openstack_networking_secgroup_rule_v2" "swarm_nfs_udp" {
security_group_id = openstack_networking_secgroup_v2.swarm_nfs_traffic.id
description = "UDP traffic"
direction = "ingress"
ethertype = "IPv4"
protocol = "udp"
remote_ip_prefix = var.swarm_nfs_private_network.network_cidr
}
resource "openstack_networking_secgroup_rule_v2" "swarm_nfs_tcp" {
security_group_id = openstack_networking_secgroup_v2.swarm_nfs_traffic.id
description = "TCP traffic"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
remote_ip_prefix = var.swarm_nfs_private_network.network_cidr
}
#
# Swarm Manager VMs
#
# Instance
resource "openstack_compute_instance_v2" "docker_swarm_managers" {
count = var.docker_swarm_data.mgr_count
name = format("%s-%02d", var.docker_swarm_data.mgr_name, count.index + 1)
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.docker_swarm_data.mgr_flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [openstack_networking_secgroup_v2.default.name, openstack_networking_secgroup_v2.swarm_internal_traffic.name]
scheduler_hints {
group = openstack_compute_servergroup_v2.swarm_masters.id
}
block_device {
uuid = var.ubuntu_2204.uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
block_device {
source_type = "blank"
volume_size = var.docker_swarm_data.mgr_data_disk_size
boot_index = -1
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
fixed_ip_v4 = var.swarm_managers_ip.* [count.index]
}
network {
name = var.swarm_nfs_private_network.network_name
}
user_data = file("${var.ubuntu2204_data_file}")
depends_on = [openstack_networking_subnet_v2.swarm_nfs_subnet]
# Do not replace the instance when the ssh key changes
lifecycle {
ignore_changes = [
# Ignore changes to tags, e.g. because a management agent
# updates these based on some ruleset managed elsewhere.
key_pair, user_data, network
]
}
}
# Swarm worker nodes
resource "openstack_compute_instance_v2" "docker_swarm_workers" {
count = var.docker_swarm_data.worker_count
name = format("%s-%02d", var.docker_swarm_data.worker_name, count.index + 1)
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.docker_swarm_data.worker_flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = ["default", var.default_security_group_name, openstack_networking_secgroup_v2.swarm_internal_traffic.name]
scheduler_hints {
group = openstack_compute_servergroup_v2.swarm_workers.id
}
block_device {
uuid = var.ubuntu_2204.uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
block_device {
source_type = "blank"
volume_size = var.docker_swarm_data.worker_data_disk_size
boot_index = -1
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
}
network {
name = var.swarm_nfs_private_network.network_name
}
network {
name = var.networks_list.shared_postgresql
}
user_data = file("${var.ubuntu2204_data_file}")
depends_on = [openstack_networking_subnet_v2.swarm_nfs_subnet]
# Do not replace the instance when the ssh key changes
lifecycle {
ignore_changes = [
# Ignore changes to tags, e.g. because a management agent
# updates these based on some ruleset managed elsewhere.
key_pair, user_data, network
]
}
}
# NFS server
# Block device
resource "openstack_blockstorage_volume_v3" "swarm_nfs_data_vol" {
name = var.docker_swarm_data.nfs_server_data_disk_name
size = var.docker_swarm_data.nfs_server_data_disk_size
}
# Instance
resource "openstack_compute_instance_v2" "swarm_nfs_server" {
name = var.docker_swarm_data.nfs_server_name
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.docker_swarm_data.nfs_server_flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [openstack_networking_secgroup_v2.default.name, openstack_networking_secgroup_v2.swarm_nfs_traffic.name]
block_device {
uuid = var.ubuntu_2204.uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
}
network {
name = var.swarm_nfs_private_network.network_name
fixed_ip_v4 = var.swarm_nfs_private_network.server_ip
}
user_data = file("${var.ubuntu2204_data_file}")
depends_on = [openstack_networking_subnet_v2.swarm_nfs_subnet]
}
# Attach the additional volume
resource "openstack_compute_volume_attach_v2" "swarm_nfs_data_attach_vol" {
instance_id = openstack_compute_instance_v2.swarm_nfs_server.id
volume_id = openstack_blockstorage_volume_v3.swarm_nfs_data_vol.id
device = var.docker_swarm_data.nfs_server_data_disk_device
depends_on = [openstack_compute_instance_v2.swarm_nfs_server]
}
#
# Octavia
#
# Swarm load balancer. L4, backed by Octavia
resource "openstack_lb_loadbalancer_v2" "swarm_lb" {
vip_subnet_id = var.main_private_subnet_id
name = var.octavia_swarm_data.swarm_lb_name
description = var.octavia_swarm_data.swarm_lb_description
flavor_id = var.octavia_swarm_data.octavia_flavor_id
vip_address = var.octavia_swarm_data.swarm_octavia_main_ip
# availability_zone = var.availability_zones_names.availability_zone_no_gpu
loadbalancer_provider = "amphora"
}
# Allocate a floating IP
resource "openstack_networking_floatingip_v2" "swarm_lb_ip" {
pool = var.floating_ip_pools.main_public_ip_pool
# The DNS association does not work because of a bug in the OpenStack API
# dns_name = "main-lb"
# dns_domain = var.dns_zone.zone_name
description = var.octavia_swarm_data.swarm_lb_description
}
resource "openstack_networking_floatingip_associate_v2" "swarm_lb" {
floating_ip = openstack_networking_floatingip_v2.swarm_lb_ip.address
port_id = openstack_lb_loadbalancer_v2.swarm_lb.vip_port_id
}
locals {
swarm_recordset_name = "${var.octavia_swarm_data.swarm_lb_hostname}.${var.dns_zone.zone_name}"
portainer_recordset_name = "portainer.${var.dns_zone.zone_name}"
ccp_recordset_name = "ccp.${var.dns_zone.zone_name}"
cdn_recordset_name = "cdn.${var.dns_zone.zone_name}"
conductor_recordset_name = "conductor.${var.dns_zone.zone_name}"
}
resource "openstack_dns_recordset_v2" "swarm_lb_dns_recordset" {
zone_id = var.dns_zone_id
name = local.swarm_recordset_name
description = "Public IP address of the load balancer in front of Docker Swarm"
ttl = 8600
type = "A"
records = [openstack_networking_floatingip_v2.swarm_lb_ip.address]
}
resource "openstack_dns_recordset_v2" "swarm_portainer_dns_recordset" {
zone_id = var.dns_zone_id
name = local.portainer_recordset_name
description = "Portainer hostname"
ttl = 8600
type = "CNAME"
records = [local.swarm_recordset_name]
}
resource "openstack_dns_recordset_v2" "ccp_dns_recordset" {
zone_id = var.dns_zone_id
name = local.ccp_recordset_name
description = "CCP hostname"
ttl = 8600
type = "CNAME"
records = [local.swarm_recordset_name]
}
resource "openstack_dns_recordset_v2" "cdn_dns_recordset" {
zone_id = var.dns_zone_id
name = local.cdn_recordset_name
description = "CDN hostname"
ttl = 8600
type = "CNAME"
records = [local.swarm_recordset_name]
}
resource "openstack_dns_recordset_v2" "conductor_dns_recordset" {
zone_id = var.dns_zone_id
name = local.conductor_recordset_name
description = "Conductor hostname"
ttl = 8600
type = "CNAME"
records = [local.swarm_recordset_name]
}
# Main HAPROXY stats listener
resource "openstack_lb_listener_v2" "swarm_haproxy_stats_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
protocol = "TCP"
protocol_port = 8880
description = "Listener for the stats of the Docker Swarm HAPROXY instances"
name = "swarm_haproxy_stats_listener"
allowed_cidrs = [var.ssh_sources.d4s_vpn_1_cidr, var.ssh_sources.d4s_vpn_2_cidr, var.ssh_sources.s2i2s_vpn_1_cidr, var.ssh_sources.s2i2s_vpn_2_cidr]
}
resource "openstack_lb_pool_v2" "swarm_haproxy_stats_pool" {
listener_id = openstack_lb_listener_v2.swarm_haproxy_stats_listener.id
protocol = "TCP"
lb_method = "LEAST_CONNECTIONS"
name = "swarm-haproxy-lb-stats"
description = "Pool for the stats of the main HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
}
resource "openstack_lb_members_v2" "swarm_haproxy_stats_pool_members" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_stats_pool.id
member {
name = "swarm mgr haproxy 1"
address = var.docker_swarm_data.mgr1_ip
protocol_port = 8880
}
member {
name = "swarm mgr haproxy 2"
address = var.docker_swarm_data.mgr2_ip
protocol_port = 8880
}
member {
name = "swarm mgr haproxy 3"
address = var.docker_swarm_data.mgr3_ip
protocol_port = 8880
}
}
resource "openstack_lb_monitor_v2" "swarm_haproxy_stats_monitor" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_stats_pool.id
name = "swarm_haproxy_stats_monitor"
type = "TCP"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
# HAPROXY HTTP
resource "openstack_lb_listener_v2" "swarm_haproxy_http_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
protocol = "TCP"
protocol_port = 80
description = "HTTP listener of the Docker Swarm HAPROXY instances"
name = "swarm_haproxy_http_listener"
admin_state_up = true
}
resource "openstack_lb_pool_v2" "swarm_haproxy_http_pool" {
listener_id = openstack_lb_listener_v2.swarm_haproxy_http_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "swarm-haproxy-lb-http"
description = "Pool for the HTTP listener of the Docker Swarm HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
}
resource "openstack_lb_members_v2" "swarm_haproxy_http_pool_members" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_http_pool.id
member {
name = "swarm mgr haproxy 1"
address = var.docker_swarm_data.mgr1_ip
protocol_port = 80
}
member {
name = "swarm mgr haproxy 2"
address = var.docker_swarm_data.mgr2_ip
protocol_port = 80
}
member {
name = "swarm mgr haproxy 3"
address = var.docker_swarm_data.mgr3_ip
protocol_port = 80
}
}
resource "openstack_lb_monitor_v2" "swarm_haproxy_http_monitor" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_http_pool.id
name = "swarm_haproxy_http_monitor"
type = "HTTP"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
# HAPROXY HTTPS
resource "openstack_lb_listener_v2" "swarm_haproxy_https_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
protocol = "TCP"
protocol_port = 443
description = "HTTPS listener of the main HAPROXY instances"
name = "swarm_haproxy_https_listener"
admin_state_up = true
}
resource "openstack_lb_pool_v2" "swarm_haproxy_https_pool" {
listener_id = openstack_lb_listener_v2.swarm_haproxy_https_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "swarm-haproxy-lb-https"
description = "Pool for the HTTPS listener of the Docker Swarm HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
}
resource "openstack_lb_members_v2" "swarm_haproxy_https_pool_members" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_https_pool.id
member {
name = "swarm mgr haproxy 1"
address = var.docker_swarm_data.mgr1_ip
protocol_port = 443
}
member {
name = "swarm mgr haproxy 2"
address = var.docker_swarm_data.mgr2_ip
protocol_port = 443
}
member {
name = "swarm mgr haproxy 3"
address = var.docker_swarm_data.mgr3_ip
protocol_port = 443
}
}
resource "openstack_lb_monitor_v2" "swarm_haproxy_https_monitor" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_https_pool.id
name = "swarm_haproxy_https_monitor"
type = "HTTPS"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
# HAPROXY HTTP on port 8080
resource "openstack_lb_listener_v2" "swarm_haproxy_8080_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.swarm_lb.id
protocol = "TCP"
protocol_port = 8080
description = "HTTP port 8080 listener of the Docker Swarm HAPROXY instances"
name = "swarm_haproxy_8080_listener"
admin_state_up = true
}
resource "openstack_lb_pool_v2" "swarm_haproxy_8080_pool" {
listener_id = openstack_lb_listener_v2.swarm_haproxy_8080_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "swarm-haproxy-lb-http-8080"
description = "Pool for the HTTP port 8080 listener of the Docker Swarm HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
}
resource "openstack_lb_members_v2" "swarm_haproxy_8080_pool_members" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_8080_pool.id
member {
name = "swarm mgr haproxy 1"
address = var.docker_swarm_data.mgr1_ip
protocol_port = 8080
}
member {
name = "swarm mgr haproxy 2"
address = var.docker_swarm_data.mgr2_ip
protocol_port = 8080
}
member {
name = "swarm mgr haproxy 3"
address = var.docker_swarm_data.mgr3_ip
protocol_port = 8080
}
}
resource "openstack_lb_monitor_v2" "swarm_haproxy_8080_monitor" {
pool_id = openstack_lb_pool_v2.swarm_haproxy_8080_pool.id
name = "swarm_haproxy_8080_monitor"
type = "HTTP"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
output "swarm_loadbalancer_ip" {
description = "Docker Swarm Load balancer IP address"
value = openstack_lb_loadbalancer_v2.swarm_lb.vip_address
}

View File

@ -0,0 +1,57 @@
variable "docker_swarm_data" {
type = map(string)
default = {
mgr_name = "swarm-mgr"
mgr1_ip = "10.1.40.31"
mgr1_cidr = "10.1.40.31/32"
mgr2_ip = "10.1.40.32"
mgr2_cidr = "10.1.40.32/32"
mgr3_ip = "10.1.40.33"
mgr3_cidr = "10.1.40.33/32"
mgr_count = 3
mgr_flavor = "m1.large"
mgr_data_disk_size = 100
worker_name = "swarm-worker"
worker_count = 5
worker_flavor = "m1.xlarge"
worker_data_disk_size = 100
nfs_server_name = "swarm-nfs-server"
nfs_server_flavor = "m1.medium"
nfs_server_data_disk_name = "Swarm NFS server data Disk"
nfs_server_data_disk_size = 100
nfs_server_data_disk_device = "/dev/vdb"
}
}
variable "swarm_managers_ip" {
type = list(string)
default = ["10.1.40.31", "10.1.40.32", "10.1.40.33"]
}
variable "octavia_swarm_data" {
type = map(string)
default = {
swarm_lb_name = "d4s-production-cloud-swarm-l4"
swarm_lb_description = "L4 balancer that serves the D4Science production Docker Swarm cluster"
swarm_lb_name = "d4s-production-cloud-swarm-l4"
octavia_flavor = "octavia_amphora-mvcpu-ha"
octavia_flavor_id = "394988b5-6603-4a1e-a939-8e177c6681c7"
swarm_lb_hostname = "swarm-lb"
swarm_octavia_main_ip = "10.1.40.30"
swarm_octavia_main_cidr = "10.1.40.30/32"
}
}
variable "swarm_nfs_private_network" {
type = map(string)
default = {
network_name = "swarm-nfs-net"
network_description = "Network used by the swarm nodes and the NFS service"
network_cidr = "192.168.4.0/23"
allocation_pool_start = "192.168.4.100"
allocation_pool_end = "192.168.5.254"
server_ip = "192.168.4.10"
server_cidr = "192.168.4.5/23"
}
}

View File

@ -0,0 +1,72 @@
# Module used
module "ssh_settings" {
source = "../../modules/ssh-key-ref"
}
# Module used
module "common_variables" {
source = "../../modules/common_variables"
}
resource "openstack_blockstorage_volume_v3" "instance_data_volume" {
for_each = var.instances_with_data_volume_map
name = each.value.volume.name
size = each.value.volume.size
}
# Generic smartgears_service instance
resource "openstack_compute_instance_v2" "instance_with_data_volume" {
for_each = var.instances_with_data_volume_map
name = each.value.name
availability_zone_hints = module.common_variables.availability_zone_no_gpu_name
flavor_name = each.value.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = each.value.security_groups
block_device {
uuid = each.value.image_ref.uuid
source_type = "image"
volume_size = each.value.image_volume_size
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
# Creates the networks according to input networks
dynamic "network" {
for_each = each.value.networks
content {
name = network.value
}
}
# Creates the scheduler_hints (i.e. server groups) according to input server_groups_ids
dynamic "scheduler_hints" {
for_each = each.value.server_groups_ids
content {
group = scheduler_hints.value
}
}
# user_data script used
user_data = file("${each.value.image_ref.user_data_file}")
# Do not replace the instance when the ssh key changes
lifecycle {
ignore_changes = [
# Ignore changes to tags, e.g. because a management agent
# updates these based on some ruleset managed elsewhere.
key_pair, user_data, network
]
}
}
# Attach the additional volume
resource "openstack_compute_volume_attach_v2" "attach_volume" {
for_each = var.instances_with_data_volume_map
instance_id = openstack_compute_instance_v2.instance_with_data_volume[each.key].id
volume_id = openstack_blockstorage_volume_v3.instance_data_volume[each.key].id
device = each.value.volume.device
depends_on = [openstack_compute_instance_v2.instance_with_data_volume]
}

View File

@ -0,0 +1,4 @@
output "instances_with_data_volume_map" {
value = var.instances_with_data_volume_map
}

View File

@ -0,0 +1,11 @@
# Define required providers
terraform {
required_version = ">= 0.14.0"
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
version = "~> 1.53.0"
}
}
}

View File

@ -0,0 +1,28 @@
#Default smartgears_service is EMPTY. Override it to create a proper smartegears plan
variable "instances_with_data_volume_map" {
type = map(object({
name = string
description = string
flavor = string
networks = list(string)
security_groups = list(string)
server_groups_ids = list(string)
image_ref = map(string)
image_volume_size = optional(number, 10)
volume = map(string)
}))
default = {
smartgears_service = {
name = "",
description = "",
flavor = "",
networks = [],
security_groups = [],
server_groups_ids = [],
image_ref = {},
volume = {}
}
}
}

View File

@ -0,0 +1,55 @@
# Module used
module "ssh_settings" {
source = "../../modules/ssh-key-ref"
}
# Module used
module "common_variables" {
source = "../../modules/common_variables"
}
# Generic smartgears_service instance
resource "openstack_compute_instance_v2" "smartgears_service" {
for_each = var.instances_without_data_volume_map
name = each.value.name
availability_zone_hints = module.common_variables.availability_zone_no_gpu_name
flavor_name = each.value.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = each.value.security_groups
block_device {
uuid = each.value.image_ref.uuid
source_type = "image"
volume_size = each.value.image_volume_size
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
# Creates the networks according to input networks
dynamic "network" {
for_each = each.value.networks
content {
name = network.value
}
}
# Creates the scheduler_hints (i.e. server groups) according to input server_groups_ids
dynamic "scheduler_hints" {
for_each = each.value.server_groups_ids
content {
group = scheduler_hints.value
}
}
#user_data script used
user_data = file("${each.value.image_ref.user_data_file}")
# Do not replace the instance when the ssh key changes
lifecycle {
ignore_changes = [
# Ignore changes to tags, e.g. because a management agent
# updates these based on some ruleset managed elsewhere.
key_pair, user_data, network
]
}
}

View File

@ -0,0 +1,4 @@
output "instances_without_data_volume_map" {
value = var.instances_without_data_volume_map
}

View File

@ -0,0 +1,11 @@
# Define required providers
terraform {
required_version = ">= 0.14.0"
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
version = "~> 1.53.0"
}
}
}

View File

@ -0,0 +1,26 @@
# Default instances without data volume is EMPTY. Override it to create a proper instance plan
variable "instances_without_data_volume_map" {
type = map(object({
name = string
description = string
flavor = string
networks = list(string)
security_groups = list(string)
server_groups_ids = list(string)
image_ref = map(string)
image_volume_size = optional(number, 10)
}))
default = {
smartgears_service = {
name = "",
description = "",
flavor = "",
networks = [],
security_groups = [],
server_groups_ids = [],
image_ref = {}
}
}
}

View File

@ -0,0 +1,89 @@
output "main_region" {
value = var.main_region
}
output "external_network" {
value = var.external_network
}
output "external_network_id" {
value = var.external_network.id
}
output "floating_ip_pools" {
value = var.floating_ip_pools
}
output "resolvers_ip" {
value = var.resolvers_ip
}
output "mtu_size" {
value = var.mtu_size
}
output "availability_zones_names" {
value = var.availability_zones_names
}
output "availability_zone_no_gpu_name" {
value = var.availability_zones_names.availability_zone_no_gpu
}
output "availability_zone_with_gpu_name" {
value = var.availability_zones_names.availability_zone_with_gpu
}
output "ssh_sources" {
value = var.ssh_sources
}
output "ubuntu_1804" {
value = var.ubuntu_1804
}
output "ubuntu_2204" {
value = var.ubuntu_2204
}
output "centos_7" {
value = var.centos_7
}
output "almalinux_9" {
value = var.almalinux_9
}
output "ubuntu1804_data_file" {
value = var.ubuntu1804_data_file
}
output "ubuntu2204_data_file" {
value = var.ubuntu2204_data_file
}
output "el7_data_file" {
value = var.el7_data_file
}
output "ssh_jump_proxy" {
value = var.ssh_jump_proxy
}
output "internal_ca_data" {
value = var.internal_ca_data
}
output "policy_list" {
value = var.policy_list
}
output "flavor_list" {
value = var.flavor_list
}
output "default_security_group_name" {
value = var.default_security_group_name
}

View File

@ -0,0 +1,157 @@
# Global definitions
variable "main_region" {
type = string
default = "isti_area_pi_1"
}
variable "external_network" {
type = map(string)
default = {
name = "external-network"
id = "1d2ff137-6ff7-4017-be2b-0d6c4af2353b"
}
}
variable "floating_ip_pools" {
type = map(string)
default = {
main_public_ip_pool = "external-network"
}
}
variable "resolvers_ip" {
type = list(string)
default = ["146.48.29.97", "146.48.29.98", "146.48.29.99"]
}
variable "mtu_size" {
type = number
default = 8942
}
variable "availability_zones_names" {
type = map(string)
default = {
availability_zone_no_gpu = "cnr-isti-nova-a"
availability_zone_with_gpu = "cnr-isti-nova-gpu-a"
}
}
variable "ubuntu_1804" {
type = map(string)
default = {
name = "Ubuntu-Bionic-18.04"
uuid = "7ed6a2cd-2b07-482e-8ce4-f018dff16c89"
user_data_file = "../../openstack_vm_data_scripts/ubuntu1804.sh"
}
}
variable "ubuntu_2204" {
type = map(string)
default = {
name = "Ubuntu-Jammy-22.04"
uuid = "54768889-8556-4be4-a2eb-82a4d9b34627"
user_data_file = "../../openstack_vm_data_scripts/ubuntu2204.sh"
}
}
variable "centos_7" {
type = map(string)
default = {
name = "CentOS-7"
uuid = "f0187a99-64f6-462a-ab5f-ef52fe62f2ca"
}
}
variable "almalinux_9" {
type = map(string)
default = {
name = "AlmaLinux-9.0-20220718"
uuid = "541650fc-dd19-4f38-bb1d-7333ed9dd688"
}
}
variable "ubuntu1804_data_file" {
default = "../../openstack_vm_data_scripts/ubuntu1804.sh"
}
variable "ubuntu2204_data_file" {
default = "../../openstack_vm_data_scripts/ubuntu2204.sh"
}
variable "el7_data_file" {
default = "../../openstack_vm_data_scripts/el7.sh"
}
variable "ssh_jump_proxy" {
type = map(string)
default = {
name = "ssh-jump-proxy"
flavor = "m2.small"
}
}
variable "internal_ca_data" {
type = map(string)
default = {
name = "ca"
flavor = "m1.small"
}
}
variable "flavor_list" {
type = map(string)
default = {
c1_small = "c1.small" #RAM 2 - VCPUs 2
c1_medium = "c1.medium" #RAM 4 - VCPUs 4
c1_large = "c1.large" #RAM 8 - VCPUs 8
c2_large = "c2.large" #RAM 16 -VCPUs 16
m1_medium = "m1.medium" #RAM 4 - VCPUs 2
m1_large = "m1.large" #RAM 8 - VCPUs 4
m1_xlarge = "m1.xlarge" #RAM 16 - VCPUs 8
m1_xxl = "m1.xxl" #RAM 32 - VCPUS 16
m2_small = "m2.small" #RAM 8 - VCPUs 2
m2_medium = "m2.medium" #RAM 16 - VCPUs 4
m2_large = "m2.large" #RAM 32 - VCPUs 8
m3_large = "m3.large" #RAM 64 - VCPUs 16
}
}
variable "policy_list" {
type = map(string)
default = {
soft_anti_affinity = "soft-anti-affinity"
anti_affinity = "anti-affinity"
affinity = "affinity"
soft_affinity = "soft-affinity"
}
}
variable "ssh_sources" {
type = map(string)
default = {
s2i2s_vpn_1_cidr = "146.48.28.10/32"
s2i2s_vpn_2_cidr = "146.48.28.11/32"
isti_vpn_gw1 = "146.48.80.101/32"
isti_vpn_gw2 = "146.48.80.102/32"
isti_vpn_gw3 = "146.48.80.103/32"
isti_net_cidr = "146.48.80.0/21"
s2i2s_net_cidr = "146.48.28.0/22"
infrascience_net_cidr = "146.48.122.0/23"
}
}
variable "networks_allocated_to_isti" {
type = map(string)
default = {
"isti_net" = "146.48.80.0/21"
"s2i2s_net" = "146.48.28.0/22"
"infrascience_net" = "146.48.122.0/23"
}
}
variable "default_security_group_name" {
default = "default_for_all"
}

View File

@ -0,0 +1,91 @@
resource "openstack_dns_zone_v2" "primary_project_dns_zone" {
name = var.dns_zone.zone_name
email = var.dns_zone.email
description = var.dns_zone.description
project_id = var.os_project_data.id
ttl = var.dns_zone.ttl
type = "PRIMARY"
}
resource "openstack_networking_network_v2" "main-private-network" {
name = var.main_private_network.name
admin_state_up = "true"
external = "false"
description = var.main_private_network.description
dns_domain = var.dns_zone.zone_name
mtu = module.common_variables.mtu_size
port_security_enabled = true
shared = false
region = module.common_variables.main_region
tenant_id = var.os_project_data.id
}
resource "openstack_networking_subnet_v2" "main-private-subnet" {
name = var.main_private_subnet.name
description = var.main_private_subnet.description
network_id = openstack_networking_network_v2.main-private-network.id
cidr = var.main_private_subnet.cidr
gateway_ip = var.main_private_subnet.gateway_ip
dns_nameservers = module.common_variables.resolvers_ip
ip_version = 4
enable_dhcp = true
tenant_id = var.os_project_data.id
allocation_pool {
start = var.main_private_subnet.allocation_start
end = var.main_private_subnet.allocation_end
}
}
resource "openstack_networking_router_v2" "external-router" {
name = module.common_variables.external_router.name
description = var.external_router.description
external_network_id = module.common_variables.external_network.id
tenant_id = var.os_project_data.id
enable_snat = true
vendor_options {
set_router_gateway_after_create = true
}
}
# Router interface configuration
resource "openstack_networking_router_interface_v2" "private-network-routing" {
router_id = openstack_networking_router_v2.external-router.id
# router_id = var.external_router.id
subnet_id = openstack_networking_subnet_v2.main-private-subnet.id
}
#
# This is the security group that should be added to every instance
resource "openstack_networking_secgroup_v2" "default" {
name = var.default_security_group_name
delete_default_rules = "true"
description = "Default security group with rules that allow ssh access from the ISTI networks, http, https"
}
resource "openstack_networking_secgroup_rule_v2" "egress-ipv4" {
security_group_id = openstack_networking_secgroup_v2.default.id
description = "Allow all the egress traffic"
direction = "egress"
ethertype = "IPv4"
}
resource "openstack_networking_secgroup_rule_v2" "ingress-icmp" {
security_group_id = openstack_networking_secgroup_v2.default.id
description = "Allow ICMP from remote"
direction = "ingress"
ethertype = "IPv4"
remote_ip_prefix = "0.0.0.0/0"
protocol = "icmp"
}
# Configure the default security group
resource "openstack_networking_secgroup_rule_v2" "default_firewall_rules" {
for_each = var.default_firewall_rules_map
security_group_id = openstack_networking_secgroup_v2.default.id
description = each.value.description
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = each.value.port_min
port_range_max = each.value.port_max
remote_ip_prefix = each.value.source
}

View File

@ -0,0 +1,15 @@
output "dns_zone_id" {
value = openstack_dns_zone_v2.primary_project_dns_zone.id
}
output "main_private_network_id" {
value = openstack_networking_network_v2.main-private-network.id
}
output "main_subnet_network_id" {
value = openstack_networking_subnet_v2.main-private-subnet.id
}
output "external_gateway_ip" {
value = openstack_networking_router_v2.external-router.external_fixed_ip
}

View File

@ -0,0 +1,21 @@
# Define required providers
terraform {
required_version = ">= 0.14.0"
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
version = "~> 1.53.0"
}
}
}
# Module used
module "ssh_settings" {
source = "../../modules/ssh-key-ref"
}
# Module used
module "common_variables" {
source = "../../modules/common_variables"
}

View File

@ -0,0 +1,62 @@
variable "os_project_data" {
type = map(string)
default = {
id = ""
}
}
variable "dns_zone" {
type = map(string)
default = {
zone_name = ""
email = "postmaster@isti.cnr.it"
description = ""
ttl = 8600
id = ""
}
}
variable "main_private_network" {
type = map(string)
default = {
name = ""
description = ""
}
}
variable "main_private_subnet" {
type = map(string)
default = {
name = ""
description = ""
cidr = ""
gateway_ip = ""
allocation_start = ""
allocation_end = ""
}
}
variable "external_router" {
type = map(string)
default = {
name = ""
description = ""
}
}
variable "default_firewall_rules_map" {
type = map(object({
description = string
source = string
port_min = number
port_max = number
}))
default = {
"ssh_from_isti_net" = {
description = "SSH from the ISTI network"
source = module.labs_common_variables.ssh_sources.isti_net_cidr
port_min = 22
port_max = 22
}
}
}

View File

@ -0,0 +1,11 @@
output "liferay_data" {
value = var.liferay_data
}
output "liferay_ip_addrs" {
value = var.liferay_ip_addrs
}
output "liferay_recordsets" {
value = var.liferay_recordsets
}

View File

@ -0,0 +1,28 @@
variable "liferay_data" {
type = map(string)
default = {
affinity_policy = "soft-anti-affinity"
srv_name = "lr62"
vm_count = 1
vm_flavor = "m1.large"
boot_vol_size = 30
}
}
variable "liferay_ip_addrs" {
type = list(string)
default = []
}
variable "liferay_recordsets" {
type = map(object({
name = string
description = string
}))
default = {
liferay_dns_record = {
name = "",
description = ""
}
}
}

View File

@ -0,0 +1,89 @@
#
# Liferay nodes
#
#
# Security group
#
resource "openstack_networking_secgroup_v2" "liferay_cluster_traffic" {
name = "liferay_cluster_traffic"
delete_default_rules = "true"
description = "Traffic between the Liferay cluster nodes"
}
resource "openstack_networking_secgroup_rule_v2" "traffic_between_liferay_nodes" {
count = var.liferay_data.vm_count
security_group_id = openstack_networking_secgroup_v2.liferay_cluster_traffic.id
description = "Traffic between liferay nodes"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
remote_ip_prefix = join("/", [element(var.liferay_ip_addrs.*, count.index), "32"])
}
#
# Object storage container
#
# Creating object bucket to store avatars
resource "openstack_objectstorage_container_v1" "liferay" {
name = "liferay-data"
versioning = true
}
#
# Server group
#
resource "openstack_compute_servergroup_v2" "liferay" {
name = "liferay"
policies = [var.liferay_data.affinity_policy]
}
# Instance(s)
resource "openstack_compute_instance_v2" "liferay" {
count = var.liferay_data.vm_count
name = format("%s-%02d", var.liferay_data.srv_name, count.index + 1)
availability_zone_hints = module.common_variables.availability_zones_names.availability_zone_no_gpu
flavor_name = var.liferay_data.vm_flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [data.terraform_remote_state.privnet_dns_router.outputs.default_security_group_name, openstack_networking_secgroup_v2.liferay_cluster_traffic.name, data.terraform_remote_state.privnet_dns_router.outputs.security_group_list.http_and_https_from_the_load_balancers, "restricted_web_service"]
scheduler_hints {
group = openstack_compute_servergroup_v2.liferay.id
}
block_device {
uuid = data.terraform_remote_state.privnet_dns_router.outputs.ubuntu_1804.uuid
source_type = "image"
volume_size = var.liferay_data.boot_vol_size
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
network {
name = data.terraform_remote_state.privnet_dns_router.outputs.main_private_network.name
fixed_ip_v4 = var.liferay_ip_addrs.* [count.index]
}
network {
name = module.common_variables.shared_postgresql_server_data.network_name
}
user_data = file("${data.terraform_remote_state.privnet_dns_router.outputs.ubuntu1804_data_file}")
# Do not replace the instance when the ssh key changes
lifecycle {
ignore_changes = [
# Ignore changes to tags, e.g. because a management agent
# updates these based on some ruleset managed elsewhere.
key_pair, user_data, network
]
}
}
resource "openstack_dns_recordset_v2" "cdn_dns_recordset" {
for_each = var.liferay_recordsets
zone_id = data.terraform_remote_state.privnet_dns_router.outputs.dns_zone_id
name = each.value.name
description = each.value.description
ttl = 8600
type = "CNAME"
records = [local.cname_target]
}
locals {
cname_target = "main-lb.${data.terraform_remote_state.privnet_dns_router.outputs.dns_zone.zone_name}"
}

View File

@ -0,0 +1,29 @@
# Define required providers
terraform {
required_version = ">= 0.14.0"
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
version = "~> 1.53.0"
}
}
}
data "terraform_remote_state" "privnet_dns_router" {
backend = "local"
config = {
path = "../project-setup/terraform.tfstate"
}
}
# SSH settings
module "ssh_settings" {
source = "../../modules/ssh-key-ref"
}
# Global variables (constants, really)
module "common_variables" {
source = "../../modules/common_variables"
}

201
modules/mongodb/mongodb.tf Normal file
View File

@ -0,0 +1,201 @@
#
# Server groups for both the masters and the workers
#
resource "openstack_compute_servergroup_v2" "mongodb" {
name = "mongodb"
policies = ["anti-affinity"]
}
#
# Security groups
#
# Rules
# 80 from 0/0
# 9101 from prometheus
# 27017 da: garr-ct1, garr-na, garr-pa1, InfraScience, S2I2S
resource "openstack_networking_secgroup_v2" "mongodb_cluster_traffic" {
name = "mongodb_cluster_traffic"
delete_default_rules = "true"
description = "Traffic between the MongoDB nodes"
}
resource "openstack_networking_secgroup_rule_v2" "access_to_the_mongodb_service_from_the_internal_network" {
security_group_id = openstack_networking_secgroup_v2.mongodb_cluster_traffic.id
description = "Access to the MongoDB service"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 27017
port_range_max = 27017
remote_ip_prefix = var.main_private_subnet.cidr
}
resource "openstack_networking_secgroup_rule_v2" "access_to_the_mongodb_service_from_the_outside" {
for_each = toset([var.networks_with_d4s_services.infrascience_net, var.networks_with_d4s_services.s2i2s_net, var.networks_with_d4s_services.garr_ct1_net, var.networks_with_d4s_services.garr_pa1_net, var.networks_with_d4s_services.garr_na_net])
security_group_id = openstack_networking_secgroup_v2.mongodb_cluster_traffic.id
description = "Access to the MongoDB service"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 27017
port_range_max = 27017
remote_ip_prefix = each.value
}
resource "openstack_networking_secgroup_rule_v2" "mongodb_plain_http_for_letsencrypt" {
security_group_id = openstack_networking_secgroup_v2.mongodb_cluster_traffic.id
description = "Plain HTTP for letsencrypt"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = "0.0.0.0/0"
}
resource "openstack_networking_secgroup_rule_v2" "mongodb_prometheus_exporter" {
security_group_id = openstack_networking_secgroup_v2.mongodb_cluster_traffic.id
description = "Prometheus exporter for MongoDB"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 9101
port_range_max = 9101
remote_ip_prefix = var.basic_services_ip.prometheus_cidr
}
#
# Mongodb cluster VMs
#
# Instance
resource "openstack_compute_instance_v2" "mongodb_cluster_nodes" {
count = var.mongodb_cluster_data.count
name = format("%s-%02d", var.mongodb_cluster_data.name, count.index + 2)
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.mongodb_cluster_data.flavor
key_pair = var.ssh_key_file.name
security_groups = [var.default_security_group_name, openstack_networking_secgroup_v2.mongodb_cluster_traffic.name]
scheduler_hints {
group = openstack_compute_servergroup_v2.mongodb.id
}
block_device {
uuid = var.mongodb_cluster_data.image_type_uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
block_device {
source_type = "blank"
volume_size = var.mongodb_cluster_data.data_disk_size
boot_index = -1
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
fixed_ip_v4 = var.mongodb_ip.* [count.index]
}
user_data = file("${var.ubuntu2204_data_file}")
# Do not replace the instance when the ssh key changes
lifecycle {
ignore_changes = [
# Ignore changes to tags, e.g. because a management agent
# updates these based on some ruleset managed elsewhere.
key_pair, user_data, network
]
}
}
# Allocate a floating IP
resource "openstack_networking_floatingip_v2" "mongodb_cluster_floating_ip" {
count = var.mongodb_cluster_data.count
pool = var.floating_ip_pools.main_public_ip_pool
# The DNS association does not work because of a bug in the OpenStack API
# dns_name = "main-lb"
# dns_domain = var.dns_zone.zone_name
description = format("MongoDB cluster node %s-%02d", var.mongodb_cluster_data.name, count.index + 2)
}
resource "openstack_compute_floatingip_associate_v2" "mongodb_cluster_ip" {
count = var.mongodb_cluster_data.count
floating_ip = element(openstack_networking_floatingip_v2.mongodb_cluster_floating_ip.*.address, count.index)
instance_id = element(openstack_compute_instance_v2.mongodb_cluster_nodes.*.id, count.index)
depends_on = [openstack_networking_floatingip_v2.mongodb_cluster_floating_ip]
}
resource "openstack_dns_recordset_v2" "mongodb_cluster_dns_recordsets" {
count = var.mongodb_cluster_data.count
zone_id = var.dns_zone_id
name = join(".", [element(openstack_compute_instance_v2.mongodb_cluster_nodes.*.name, count.index), var.dns_zone.zone_name])
description = "Mongodb public hostnames"
ttl = 8600
type = "A"
records = [element(openstack_networking_floatingip_v2.mongodb_cluster_floating_ip.*.address, count.index)]
depends_on = [openstack_networking_floatingip_v2.mongodb_cluster_floating_ip]
}
#
# MongoDB vol node
#
# Instance
resource "openstack_compute_instance_v2" "mongodb_vol_node" {
name = "mongodb-vol"
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.mongodb_vol_data.flavor
key_pair = var.ssh_key_file.name
security_groups = [var.default_security_group_name, openstack_networking_secgroup_v2.mongodb_cluster_traffic.name]
block_device {
uuid = var.mongodb_vol_data.image_type_uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
block_device {
source_type = "blank"
volume_size = var.mongodb_vol_data.data_disk_size
boot_index = -1
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
fixed_ip_v4 = var.mongodb_vol_ip
}
user_data = file("${var.ubuntu2204_data_file}")
# Do not replace the instance when the ssh key changes
lifecycle {
ignore_changes = [
# Ignore changes to tags, e.g. because a management agent
# updates these based on some ruleset managed elsewhere.
key_pair, user_data, network
]
}
}
# Allocate a floating IP
resource "openstack_networking_floatingip_v2" "mongodb_vol_floating_ip" {
pool = var.floating_ip_pools.main_public_ip_pool
# The DNS association does not work because of a bug in the OpenStack API
# dns_name = "main-lb"
# dns_domain = var.dns_zone.zone_name
description = "MongoDB Volatile"
}
resource "openstack_compute_floatingip_associate_v2" "mongodb_vol_public_ip" {
floating_ip = openstack_networking_floatingip_v2.mongodb_vol_floating_ip.address
instance_id = openstack_compute_instance_v2.mongodb_vol_node.id
depends_on = [openstack_networking_floatingip_v2.mongodb_vol_floating_ip]
}
resource "openstack_dns_recordset_v2" "mongodb_vol_dns_recordsets" {
zone_id = var.dns_zone_id
name = join(".", [openstack_compute_instance_v2.mongodb_vol_node.name], [var.dns_zone.zone_name])
description = "Mongodb Volatile public hostnames"
ttl = 8600
type = "A"
records = [openstack_networking_floatingip_v2.mongodb_vol_floating_ip.address]
depends_on = [openstack_networking_floatingip_v2.mongodb_vol_floating_ip]
}

View File

@ -0,0 +1,32 @@
variable "mongodb_cluster_data" {
type = map(string)
default = {
count = 3
name = "mongodb-replica"
flavor = "m1.small"
data_disk_size = 100
image_type_name = "Ubuntu-Focal-20.04"
image_type_uuid = "75c23040-2be7-49e9-8029-a16dc9f755d1"
}
}
variable "mongodb_ip" {
type = list(string)
default = []
}
variable "mongodb_vol_data" {
type = map(string)
default = {
name = "mongodb-vol"
flavor = "m1.small"
data_disk_size = 100
image_type_name = "Ubuntu-Focal-20.04"
image_type_uuid = "75c23040-2be7-49e9-8029-a16dc9f755d1"
}
}
variable "mongodb_vol_ip" {
default = ""
}

View File

@ -0,0 +1,244 @@
# OrientDB and OrientDB for the smart executors
#
resource "openstack_compute_servergroup_v2" "orientdb_cluster" {
name = "orientdb_cluster"
policies = [var.orientdb_affinity_policy]
}
#
# Network for the cluster traffic
#
resource "openstack_networking_network_v2" "orientdb_network" {
name = var.orientdb_net.network_name
admin_state_up = "true"
external = "false"
description = var.orientdb_net.network_description
mtu = module.common_variables.mtu_size
port_security_enabled = true
shared = false
region = module.common_variables.main_region
}
# Subnet
resource "openstack_networking_subnet_v2" "orientdb_subnet" {
name = "orientdb-subnet"
description = "Subnet used by the OrientDB service"
network_id = openstack_networking_network_v2.orientdb_network.id
cidr = var.orientdb_net.network_cidr
dns_nameservers = module.common_variables.resolvers_ip
ip_version = 4
enable_dhcp = true
no_gateway = true
allocation_pool {
start = var.orientdb_net.allocation_pool_start
end = var.orientdb_net.allocation_pool_end
}
}
#
# Network for the OrientDB SE
#
resource "openstack_networking_network_v2" "orientdb_se_network" {
name = var.orientdb_se_net.network_name
admin_state_up = "true"
external = "false"
description = var.orientdb_se_net.network_description
mtu = module.common_variables.mtu_size
port_security_enabled = true
shared = false
region = module.common_variables.main_region
}
# Subnet
resource "openstack_networking_subnet_v2" "orientdb_se_subnet" {
name = "orientdb-se-subnet"
description = "Subnet used by the OrientDB for Smart Executor"
network_id = openstack_networking_network_v2.orientdb_se_network.id
cidr = var.orientdb_se_net.network_cidr
dns_nameservers = module.common_variables.resolvers_ip
ip_version = 4
enable_dhcp = true
no_gateway = true
allocation_pool {
start = var.orientdb_se_net.allocation_pool_start
end = var.orientdb_se_net.allocation_pool_end
}
}
#
# Security groups
#
# Main OrientDB service
# Between OrientDB nodes
resource "openstack_networking_secgroup_v2" "orientdb_internal_traffic" {
name = "orientdb_internal_traffic"
delete_default_rules = "true"
description = "Traffic between the OrientDB nodes"
}
resource "openstack_networking_secgroup_rule_v2" "orientdb_ports" {
count = var.orientdb_nodes_count
security_group_id = openstack_networking_secgroup_v2.orientdb_internal_traffic.id
description = "TCP traffic between OrientDB nodes"
port_range_min = 2424
port_range_max = 2490
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
# remote_ip_prefix = format("%s-%02d", var.orientdb_ip, count.index+1, "/32")
remote_ip_prefix = var.orientdb_cidr.* [count.index]
}
# Access from the clients
resource "openstack_networking_secgroup_v2" "access_to_orientdb" {
name = "access_to_orientdb"
delete_default_rules = "true"
description = "Clients that talk to the OrientDB service"
}
resource "openstack_networking_secgroup_rule_v2" "access_to_orient_from_clients" {
for_each = toset([data.terraform_remote_state.privnet_dns_router.outputs.basic_services_ip.ssh_jump_cidr, openstack_networking_subnet_v2.orientdb_subnet.cidr])
security_group_id = openstack_networking_secgroup_v2.access_to_orientdb.id
description = "TCP traffic from the resource registries and the SSH jump server"
port_range_min = 2424
port_range_max = 2490
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
remote_ip_prefix = each.value
}
resource "openstack_networking_secgroup_rule_v2" "access_to_orient_from_haproxy" {
for_each = toset([data.terraform_remote_state.privnet_dns_router.outputs.basic_services_ip.haproxy_l7_1_cidr, data.terraform_remote_state.privnet_dns_router.outputs.basic_services_ip.haproxy_l7_2_cidr])
security_group_id = openstack_networking_secgroup_v2.access_to_orientdb.id
description = "TCP traffic from the load balancers"
port_range_min = 2480
port_range_max = 2480
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
remote_ip_prefix = each.value
}
# OrientDB for the Smart Executor nodes
# Access from the clients
resource "openstack_networking_secgroup_v2" "access_to_orientdb_se" {
name = "access_to_orientdb_se"
delete_default_rules = "true"
description = "Clients that talk to the OrientDB SE service"
}
resource "openstack_networking_secgroup_rule_v2" "access_to_orient_se_from_clients" {
for_each = toset([data.terraform_remote_state.privnet_dns_router.outputs.basic_services_ip.ssh_jump_cidr, openstack_networking_subnet_v2.orientdb_se_subnet.cidr])
security_group_id = openstack_networking_secgroup_v2.access_to_orientdb_se.id
description = "TCP traffic from the smart executors and the SSH jump server"
port_range_min = 2424
port_range_max = 2490
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
remote_ip_prefix = each.value
}
resource "openstack_networking_secgroup_rule_v2" "access_to_orient_se_from_haproxy" {
for_each = toset([data.terraform_remote_state.privnet_dns_router.outputs.basic_services_ip.haproxy_l7_1_cidr, data.terraform_remote_state.privnet_dns_router.outputs.basic_services_ip.haproxy_l7_2_cidr])
security_group_id = openstack_networking_secgroup_v2.access_to_orientdb_se.id
description = "TCP traffic from the load balancers"
port_range_min = 2480
port_range_max = 2480
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
remote_ip_prefix = each.value
}
#
# OrientDB main cluster
#
# Instances used by the resource registry
resource "openstack_compute_instance_v2" "orientdb_servers" {
count = var.orientdb_nodes_count
name = format("%s-%02d", var.orientdb_data.node_name, count.index + 1)
availability_zone_hints = module.common_variables.availability_zones_names.availability_zone_no_gpu
image_name = var.orientdb_image_name
flavor_name = var.orientdb_node_flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [data.terraform_remote_state.privnet_dns_router.outputs.default_security_group_name, openstack_networking_secgroup_v2.orientdb_internal_traffic.name, openstack_networking_secgroup_v2.access_to_orientdb.name]
scheduler_hints {
group = openstack_compute_servergroup_v2.orientdb_cluster.id
}
block_device {
uuid = var.orientdb_image_uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
block_device {
source_type = "blank"
volume_size = var.orientdb_data.node_data_disk_size
boot_index = -1
destination_type = "volume"
delete_on_termination = false
}
network {
name = data.terraform_remote_state.privnet_dns_router.outputs.main_private_network.name
}
network {
name = var.orientdb_net.network_name
fixed_ip_v4 = var.orientdb_ip.* [count.index]
}
user_data = file("${module.common_variables.ubuntu_2204.user_data_file}")
depends_on = [openstack_networking_subnet_v2.orientdb_subnet]
# Do not replace the instance when the ssh key changes
lifecycle {
ignore_changes = [
# Ignore changes to tags, e.g. because a management agent
# updates these based on some ruleset managed elsewhere.
key_pair, user_data, network
]
}
}
# Instance used by the smart executors
resource "openstack_compute_instance_v2" "orientdb_se_server" {
name = "orientdb-se"
availability_zone_hints = module.common_variables.availability_zones_names.availability_zone_no_gpu
image_name = var.orientdb_se_image_name
flavor_name = var.orientdb_se_node_flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [data.terraform_remote_state.privnet_dns_router.outputs.default_security_group_name, openstack_networking_secgroup_v2.access_to_orientdb_se.name]
block_device {
uuid = var.orientdb_image_uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
block_device {
source_type = "blank"
volume_size = var.orientdb_data.node_data_disk_size
boot_index = -1
destination_type = "volume"
delete_on_termination = false
}
network {
name = data.terraform_remote_state.privnet_dns_router.outputs.main_private_network.name
}
network {
name = var.orientdb_se_net.network_name
fixed_ip_v4 = var.orientdb_se_ip
}
user_data = file("${module.common_variables.ubuntu_2204.user_data_file}")
depends_on = [openstack_networking_subnet_v2.orientdb_se_subnet]
# Do not replace the instance when the ssh key changes
lifecycle {
ignore_changes = [
# Ignore changes to tags, e.g. because a management agent
# updates these based on some ruleset managed elsewhere.
key_pair, user_data, network
]
}
}

View File

@ -0,0 +1,29 @@
# Define required providers
terraform {
required_version = ">= 0.14.0"
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
version = "~> 1.53.0"
}
}
}
data "terraform_remote_state" "privnet_dns_router" {
backend = "local"
config = {
path = "../project-setup/terraform.tfstate"
}
}
# SSH settings
module "ssh_settings" {
source = "../../modules/ssh-key-ref"
}
# Global variables (constants, really)
module "common_variables" {
source = "../../modules/common_variables"
}

View File

@ -0,0 +1,85 @@
variable "orientdb_data" {
type = map(string)
default = {
node_name = "orientdb"
node_data_disk_size = 10
node_data_disk_device = "/dev/vdb"
}
}
variable "orientdb_affinity_policy" {
default = "soft-anti-affinity"
}
variable "orientdb_node_flavor" {
default = ""
}
variable "orientdb_nodes_count" {
default = ""
}
variable "orientdb_image_name" {
default = ""
}
variable "orientdb_se_image_name" {
default = ""
}
variable "orientdb_image_uuid" {
default = ""
}
variable "orientdb_se_image_uuid" {
default = ""
}
variable "orientdb_ip" {
type = list(string)
default = ["192.168.10.5", "192.168.10.6", "192.168.10.7"]
}
variable "orientdb_cidr" {
type = list(string)
default = ["192.168.10.5/32", "192.168.10.6/32", "192.168.10.7/32"]
}
variable "orientdb_se_node_flavor" {
default = ""
}
variable "orientdb_se_ip" {
default = ""
}
variable "orientdb_se_cidr" {
default = ""
}
variable "orientdb_net" {
type = map(string)
default = {
network_name = "orientdb-net"
network_description = "Network used by the OrientDB cluster and to access the service"
network_cidr = "192.168.10.0/24"
allocation_pool_start = "192.168.10.11"
allocation_pool_end = "192.168.10.254"
}
}
variable "orientdb_se_net" {
type = map(string)
default = {
network_name = "orientdb-se-net"
network_description = "Network used by the OrientDB for Smart Executor"
network_cidr = "192.168.12.0/24"
allocation_pool_start = "192.168.12.11"
allocation_pool_end = "192.168.12.254"
}
}
variable "orientdb_se_secgroup" {
default = "access_to_orientdb_se"
}
variable "postgresql_secgroup" {
default = "PostgreSQL service"
}

View File

@ -0,0 +1,165 @@
# Accounting dashboard harvester
resource "openstack_compute_instance_v2" "accounting_dashboard_harvester" {
name = var.accounting_dashboard_harvester.name
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.accounting_dashboard_harvester.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name]
block_device {
uuid = var.ubuntu_1804.uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
}
network {
name = var.networks_list.orientdb_se
}
network {
name = var.networks_list.shared_postgresql
}
network {
name = var.networks_list.timescaledb
}
user_data = file("${var.ubuntu1804_data_file}")
# Do not replace the instance when the ssh key changes
lifecycle {
ignore_changes = [
# Ignore changes to tags, e.g. because a management agent
# updates these based on some ruleset managed elsewhere.
key_pair, user_data, network
]
}
}
# Resource checker
resource "openstack_compute_instance_v2" "resource_checker" {
name = var.resource_checker.name
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.resource_checker.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name]
block_device {
uuid = var.ubuntu_1804.uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
}
network {
name = var.networks_list.orientdb_se
}
user_data = file("${var.ubuntu1804_data_file}")
# Do not replace the instance when the ssh key changes
lifecycle {
ignore_changes = [
# Ignore changes to tags, e.g. because a management agent
# updates these based on some ruleset managed elsewhere.
key_pair, user_data, network
]
}
}
# Social data indexer
resource "openstack_compute_instance_v2" "social_data_indexer" {
name = var.social_data_indexer.name
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.social_data_indexer.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name]
block_device {
uuid = var.ubuntu_1804.uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
}
network {
name = var.networks_list.orientdb_se
}
user_data = file("${var.ubuntu1804_data_file}")
# Do not replace the instance when the ssh key changes
lifecycle {
ignore_changes = [
# Ignore changes to tags, e.g. because a management agent
# updates these based on some ruleset managed elsewhere.
key_pair, user_data, network
]
}
}
# Accounting insert storage
resource "openstack_compute_instance_v2" "accounting_insert_storage" {
name = var.accounting_insert_storage.name
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.accounting_insert_storage.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name]
block_device {
uuid = var.ubuntu_1804.uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
}
network {
name = var.networks_list.orientdb_se
}
user_data = file("${var.ubuntu1804_data_file}")
# Do not replace the instance when the ssh key changes
lifecycle {
ignore_changes = [
# Ignore changes to tags, e.g. because a management agent
# updates these based on some ruleset managed elsewhere.
key_pair, user_data, network
]
}
}
# Accounting aggregator
resource "openstack_compute_instance_v2" "accounting_aggregator" {
name = var.accounting_aggregator.name
availability_zone_hints = var.availability_zones_names.availability_zone_no_gpu
flavor_name = var.accounting_aggregator.flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [var.default_security_group_name]
block_device {
uuid = var.ubuntu_1804.uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
network {
name = var.main_private_network.name
}
network {
name = var.networks_list.orientdb_se
}
user_data = file("${var.ubuntu1804_data_file}")
# Do not replace the instance when the ssh key changes
lifecycle {
ignore_changes = [
# Ignore changes to tags, e.g. because a management agent
# updates these based on some ruleset managed elsewhere.
key_pair, user_data, network
]
}
}

View File

@ -0,0 +1,44 @@
variable "accounting_dashboard_harvester" {
type = map(string)
default = {
name = "accounting-dashboard-harvester-se-plugin"
description = "Accounting Dashboard Harvester SE plugin"
flavor = "m1.medium"
}
}
variable "resource_checker" {
type = map(string)
default = {
name = "resource-checker-se-plugin"
description = "Resource checker SE plugin"
flavor = "c1.small"
}
}
variable "social_data_indexer" {
type = map(string)
default = {
name = "social-data-indexer-se-plugin"
description = "Social data indexer SE plugin"
flavor = "c1.small"
}
}
variable "accounting_insert_storage" {
type = map(string)
default = {
name = "accounting-insert-storage-se-plugin"
description = "Accounting insert storage SE plugin"
flavor = "c1.small"
}
}
variable "accounting_aggregator" {
type = map(string)
default = {
name = "accounting-aggregator-se-plugin"
description = "Accounting aggregator SE plugin"
flavor = "m1.medium"
}
}

View File

@ -0,0 +1,10 @@
output "ssh_key_file" {
value = "~/.ssh/id_ed25519"
sensitive = true
}
output "ssh_key_name" {
value = "adellam"
sensitive = false
}

View File

@ -0,0 +1,12 @@
#This file must be renamed as 'ssh-key-ref-outputs.tf'
#replace the placeholders {YOUR_PRIVATE_KEYNAME} (without .pub) and {YOUR_KEYNAME} with proper values
output "ssh_key_file" {
value = "~/.ssh/{YOUR_PRIVATE_KEYNAME}"
sensitive = true
}
output "ssh_key_name" {
value = "{YOUR_KEYNAME}"
sensitive = false
}

View File

@ -0,0 +1,19 @@
# Define required providers
terraform {
required_version = ">= 0.14.0"
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
version = "~> 1.53.0"
}
}
}
module "ssh_settings" {
source = "../../modules/ssh-key-ref"
}
resource "openstack_compute_keypair_v2" "initial_ssh_key" {
name = module.ssh_settings.ssh_key_name
public_key = file("${module.ssh_settings.ssh_key_file}.pub")
}

View File

@ -0,0 +1,29 @@
# Define required providers
terraform {
required_version = ">= 0.14.0"
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
version = "~> 1.53.0"
}
}
}
data "terraform_remote_state" "privnet_dns_router" {
backend = "local"
config = {
path = "../project-setup/terraform.tfstate"
}
}
# SSH settings
module "ssh_settings" {
source = "../../modules/ssh-key-ref"
}
# Global variables (constants, really)
module "common_variables" {
source = "../../modules/common_variables"
}

View File

@ -0,0 +1,104 @@
#
# TimeScaleDB shared server
# Network
resource "openstack_networking_network_v2" "timescaledb_net" {
name = var.timescaledb_net.network_name
admin_state_up = "true"
external = "false"
description = var.timescaledb_net.network_description
dns_domain = data.terraform_remote_state.privnet_dns_router.outputs.dns_zone.zone_name
mtu = module.common_variables.mtu_size
port_security_enabled = true
shared = false
region = module.common_variables.main_region
}
# Subnet
resource "openstack_networking_subnet_v2" "timescaledb_subnet" {
name = "timescaledb-subnet"
description = "subnet used to connect to the shared TimeScaleDB service"
network_id = openstack_networking_network_v2.timescaledb_net.id
cidr = var.timescaledb_net.network_cidr
dns_nameservers = module.common_variables.resolvers_ip
ip_version = 4
enable_dhcp = true
no_gateway = true
allocation_pool {
start = var.timescaledb_net.allocation_pool_start
end = var.timescaledb_net.allocation_pool_end
}
}
# Security group
resource "openstack_networking_secgroup_v2" "timescaledb_access" {
name = "access_to_the_timescaledb_service"
delete_default_rules = "true"
description = "Access the shared TimeScaleDB service using the dedicated network"
}
resource "openstack_networking_secgroup_rule_v2" "timescaledb_access_from_dedicated_subnet" {
security_group_id = openstack_networking_secgroup_v2.timescaledb_access.id
description = "Allow connections to port 5432 from the 192.168.11.0/24 network"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 5432
port_range_max = 5432
remote_ip_prefix = var.timescaledb_net.network_cidr
}
resource "openstack_compute_servergroup_v2" "timescaledb_cluster" {
name = "timescaledb_cluster"
policies = [var.timescaledb_affinity_policy]
}
# Instances with an additional block device
resource "openstack_compute_instance_v2" "timescaledb_server" {
count = var.timescaledb_nodes_count
name = format("%s-%02d", var.timescaledb_server_data.node_name, count.index + 1)
availability_zone_hints = module.common_variables.availability_zones_names.availability_zone_no_gpu
image_name = module.common_variables.ubuntu_2204.name
flavor_name = var.timescaledb_node_flavor
key_pair = module.ssh_settings.ssh_key_name
security_groups = [data.terraform_remote_state.privnet_dns_router.outputs.default_security_group_name, openstack_networking_secgroup_v2.timescaledb_access.name]
scheduler_hints {
group = openstack_compute_servergroup_v2.timescaledb_cluster.id
}
block_device {
uuid = module.common_variables.ubuntu_2204.uuid
source_type = "image"
volume_size = 10
boot_index = 0
destination_type = "volume"
delete_on_termination = false
}
block_device {
source_type = "blank"
volume_size = var.timescaledb_server_data.node_data_disk_size
boot_index = -1
destination_type = "volume"
delete_on_termination = false
}
network {
name = data.terraform_remote_state.privnet_dns_router.outputs.main_private_network.name
}
network {
name = var.timescaledb_net.network_name
fixed_ip_v4 = var.timescaledb_ip.* [count.index]
}
user_data = file("${module.common_variables.ubuntu_2204.user_data_file}")
depends_on = [openstack_networking_subnet_v2.timescaledb_subnet]
# Do not replace the instance when the ssh key changes
lifecycle {
ignore_changes = [
# Ignore changes to tags, e.g. because a management agent
# updates these based on some ruleset managed elsewhere.
key_pair, user_data, network
]
}
}

View File

@ -0,0 +1,41 @@
variable "timescaledb_server_data" {
type = map(string)
default = {
node_name = "timescaledb"
node_data_disk_size = 20
node_data_disk_device = "/dev/vdb"
}
}
variable "timescaledb_affinity_policy" {
default = "soft-anti-affinity"
}
variable "timescaledb_node_flavor" {
default = ""
}
variable "timescaledb_nodes_count" {
default = 0
}
variable "timescaledb_ip" {
type = list(string)
default = []
}
variable "timescaledb_cidr" {
type = list(string)
default = []
}
variable "timescaledb_net" {
type = map(string)
default = {
network_name = "timescaledb-net"
network_description = "Network used by the Timescaledb cluster and to access the service"
network_cidr = "192.168.11.0/24"
allocation_pool_start = "192.168.11.20"
allocation_pool_end = "192.168.11.254"
}
}

View File

@ -0,0 +1,190 @@
# Define required providers
terraform {
required_version = ">= 0.14.0"
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
version = "~> 1.53.0"
}
}
}
provider "openstack" {
# cloud = "s2i2s-proj"
cloud = "ISTI-Cloud"
}
module "common_variables" {
source = "../../modules/common_variables"
}
# Main module
module "main_private_net_and_dns_zone" {
source = "../../modules/main_private_net_and_dns_zone"
dns_zone = {
zone_name = "s2i2s.cloud.isti.cnr.it."
email = "postmaster@isti.cnr.it"
description = "DNS primary zone for the d4s-production-cloud project"
ttl = 8600
}
os_project_data = {
id = "1b45adf388934758b56d0dfdb4bfacf3"
}
main_private_network = {
name = "s2i2s-cloud-main"
description = "S2I2S private network (use this as the main network)"
}
main_private_subnet = {
name = "s2i2s-production-cloud-main-subnet"
description = "S2I2S main private subnet"
cidr = "10.11.0.0/21"
gateway_ip = "10.11.0.1"
allocation_start = "10.11.1.1"
allocation_end = "10.11.7.254"
}
external_router = {
name = "s2i2s-cloud-external-router"
description = "S2I2S main router"
}
}
output "dns_zone_id" {
value = module.main_private_net_and_dns_zone.dns_zone_id
}
output "main_private_network_id" {
value = module.main_private_net_and_dns_zone.main_private_network_id
}
output "main_subnet_network_id" {
value = module.main_private_net_and_dns_zone.main_subnet_network_id
}
output "external_gateway_ip" {
value = module.main_private_net_and_dns_zone.external_gateway_ip
}
# Module used
output "main_region" {
value = module.common_variables.main_region
}
output "external_network" {
value = module.common_variables.external_network
}
output "external_network_id" {
value = module.common_variables.external_network.id
}
output "floating_ip_pools" {
value = module.common_variables.floating_ip_pools
}
output "resolvers_ip" {
value = module.common_variables.resolvers_ip
}
output "mtu_size" {
value = module.common_variables.mtu_size
}
output "availability_zones_names" {
value = module.common_variables.availability_zones_names
}
output "availability_zone_no_gpu_name" {
value = module.common_variables.availability_zones_names.availability_zone_no_gpu
}
output "availability_zone_with_gpu_name" {
value = module.common_variables.availability_zones_names.availability_zone_with_gpu
}
output "ssh_sources" {
value = module.common_variables.ssh_sources
}
output "networks_with_d4s_services" {
value = module.common_variables.networks_with_d4s_services
}
output "ubuntu_1804" {
value = module.common_variables.ubuntu_1804
}
output "ubuntu_2204" {
value = module.common_variables.ubuntu_2204
}
output "centos_7" {
value = module.common_variables.centos_7
}
output "almalinux_9" {
value = module.common_variables.almalinux_9
}
output "ubuntu1804_data_file" {
value = module.common_variables.ubuntu1804_data_file
}
output "ubuntu2204_data_file" {
value = module.common_variables.ubuntu2204_data_file
}
output "el7_data_file" {
value = module.common_variables.el7_data_file
}
output "ssh_jump_proxy" {
value = module.common_variables.ssh_jump_proxy
}
output "internal_ca_data" {
value = module.common_variables.internal_ca_data
}
output "prometheus_server_data" {
value = module.common_variables.prometheus_server_data
}
output "shared_postgresql_server_data" {
value = module.common_variables.shared_postgresql_server_data
}
output "haproxy_l7_data" {
value = module.common_variables.haproxy_l7_data
}
output "resource_registry_addresses" {
value = module.common_variables.resource_registry_addresses
}
output "smartexecutor_addresses" {
value = module.common_variables.smartexecutor_addresses
}
#Added by Francesco
output "policy_list" {
value = module.common_variables.policy_list
}
#Added by Francesco
output "flavor_list" {
value = module.common_variables.flavor_list
}
#Added by Francesco
output "security_group_list" {
value = module.common_variables.security_group_list
}
#Added by Francesco
output "networks_list" {
value = module.common_variables.networks_list
}