openstack-infrastructure-te.../modules/complete_infra_setup/octavia.tf

237 lines
8.3 KiB
HCL

# Main load balancer. L4, backed by Octavia
# Create the netework and subnet used to make Octavia and HAPROXY communicate
resource "openstack_networking_network_v2" "octavia-private-network" {
name = var.octavia_information.network_name
admin_state_up = "true"
external = "false"
description = var.octavia_information.network_description
dns_domain = data.terraform_remote_state.privnet_dns_router.outputs.dns_zone.name
mtu = module.labs_common_variables.mtu_size
port_security_enabled = true
shared = false
region = module.labs_common_variables.main_region
# tenant_id = data.terraform_remote_state.privnet_dns_router.outputs.os_project_data.id
}
resource "openstack_networking_subnet_v2" "octavia-private-subnet" {
name = var.octavia_information.subnet_name
description = var.octavia_information.subnet_description
network_id = openstack_networking_network_v2.octavia-private-network.id
cidr = var.octavia_information.subnet_cidr
gateway_ip = var.octavia_information.gateway_ip
dns_nameservers = module.labs_common_variables.resolvers_ip
ip_version = 4
enable_dhcp = true
# tenant_id = data.terraform_remote_state.privnet_dns_router.outputs.os_project_data.id
allocation_pool {
start = var.octavia_information.allocation_pool_start
end = var.octavia_information.allocation_pool_end
}
}
resource "openstack_networking_router_v2" "octavia-external-router" {
name = var.octavia_information.external_router_name
description = var.octavia_information.external_router_description
external_network_id = module.labs_common_variables.external_network.id
# tenant_id = data.terraform_remote_state.privnet_dns_router.outputs.os_project_data.id
enable_snat = true
vendor_options {
set_router_gateway_after_create = true
}
}
# Router interface configuration
resource "openstack_networking_router_interface_v2" "octavia-private-network-routing" {
router_id = openstack_networking_router_v2.octavia-external-router.id
# router_id = var.external_router.id
subnet_id = openstack_networking_subnet_v2.octavia-private-subnet.id
}
resource "openstack_lb_loadbalancer_v2" "main_lb" {
vip_subnet_id = openstack_networking_subnet_v2.octavia-private-subnet.id
name = var.octavia_information.main_lb_name
description = var.octavia_information.main_lb_description
flavor_id = var.octavia_information.octavia_flavor_id
vip_address = var.basic_services_ip.octavia_main
availability_zone = module.labs_common_variables.availability_zones_names.availability_zone_no_gpu
loadbalancer_provider = "amphora"
}
# Allocate a floating IP
resource "openstack_networking_floatingip_v2" "main_lb_ip" {
pool = var.floating_ip_pools.main_public_ip_pool
# The DNS association does not work because of a bug in the OpenStack API
# dns_name = "main-lb"
# dns_domain = var.dns_zone.zone_name
description = var.octavia_information.main_lb_description
}
resource "openstack_networking_floatingip_associate_v2" "main_lb" {
floating_ip = openstack_networking_floatingip_v2.main_lb_ip.address
port_id = openstack_lb_loadbalancer_v2.main_lb.vip_port_id
}
locals {
recordset_name = "${var.octavia_information.main_lb_hostname}.${data.terraform_remote_state.privnet_dns_router.outputs.dns_zone.name}"
}
resource "openstack_dns_recordset_v2" "main_lb_dns_recordset" {
zone_id = data.terraform_remote_state.privnet_dns_router.outputs.dns_zone.id
name = local.recordset_name
description = "Public IP address of the main Octavia load balancer"
ttl = 8600
type = "A"
records = [openstack_networking_floatingip_v2.main_lb_ip.address]
}
# Main HAPROXY stats listener
resource "openstack_lb_listener_v2" "main_haproxy_stats_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
protocol = "TCP"
protocol_port = 8880
description = "Listener for the stats of the main HAPROXY instances"
name = "main_haproxy_stats_listener"
allowed_cidrs = [var.ssh_sources.d4s_vpn_1_cidr, var.ssh_sources.d4s_vpn_2_cidr, var.ssh_sources.s2i2s_vpn_1_cidr, var.ssh_sources.s2i2s_vpn_2_cidr]
}
resource "openstack_lb_pool_v2" "main_haproxy_stats_pool" {
listener_id = openstack_lb_listener_v2.main_haproxy_stats_listener.id
protocol = "TCP"
lb_method = "LEAST_CONNECTIONS"
name = "main-haproxy-lb-stats"
description = "Pool for the stats of the main HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
}
resource "openstack_lb_members_v2" "main_haproxy_stats_pool_members" {
pool_id = openstack_lb_pool_v2.main_haproxy_stats_pool.id
member {
name = "haproxy l7 1"
address = var.basic_services_ip.haproxy_l7_1
protocol_port = 8880
}
member {
name = "haproxy l7 2"
address = var.basic_services_ip.haproxy_l7_2
protocol_port = 8880
}
}
resource "openstack_lb_monitor_v2" "main_haproxy_stats_monitor" {
pool_id = openstack_lb_pool_v2.main_haproxy_stats_pool.id
name = "main_haproxy_stats_monitor"
type = "TCP"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
# Main HAPROXY HTTP
resource "openstack_lb_listener_v2" "main_haproxy_http_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
protocol = "TCP"
protocol_port = 80
description = "HTTP listener of the main HAPROXY instances"
name = "main_haproxy_http_listener"
admin_state_up = true
}
resource "openstack_lb_pool_v2" "main_haproxy_http_pool" {
listener_id = openstack_lb_listener_v2.main_haproxy_http_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "main-haproxy-lb-http"
description = "Pool for the HTTP listener of the main HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
}
resource "openstack_lb_members_v2" "main_haproxy_http_pool_members" {
pool_id = openstack_lb_pool_v2.main_haproxy_http_pool.id
member {
name = "haproxy l7 1"
address = var.basic_services_ip.haproxy_l7_1
protocol_port = 80
}
member {
name = "haproxy l7 2"
address = var.basic_services_ip.haproxy_l7_2
protocol_port = 80
}
}
resource "openstack_lb_monitor_v2" "main_haproxy_http_monitor" {
pool_id = openstack_lb_pool_v2.main_haproxy_http_pool.id
name = "main_haproxy_http_monitor"
type = "HTTP"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
# Main HAPROXY HTTPS
resource "openstack_lb_listener_v2" "main_haproxy_https_listener" {
loadbalancer_id = openstack_lb_loadbalancer_v2.main_lb.id
protocol = "TCP"
protocol_port = 443
description = "HTTPS listener of the main HAPROXY instances"
name = "main_haproxy_https_listener"
admin_state_up = true
}
resource "openstack_lb_pool_v2" "main_haproxy_https_pool" {
listener_id = openstack_lb_listener_v2.main_haproxy_https_listener.id
protocol = "PROXYV2"
lb_method = "LEAST_CONNECTIONS"
name = "main-haproxy-lb-https"
description = "Pool for the HTTPS listener of the main HAPROXY instances"
persistence {
type = "SOURCE_IP"
}
admin_state_up = true
}
resource "openstack_lb_members_v2" "main_haproxy_https_pool_members" {
pool_id = openstack_lb_pool_v2.main_haproxy_https_pool.id
member {
name = "haproxy l7 1"
address = var.basic_services_ip.haproxy_l7_1
protocol_port = 443
}
member {
name = "haproxy l7 2"
address = var.basic_services_ip.haproxy_l7_2
protocol_port = 443
}
}
resource "openstack_lb_monitor_v2" "main_haproxy_https_monitor" {
pool_id = openstack_lb_pool_v2.main_haproxy_https_pool.id
name = "main_haproxy_https_monitor"
type = "HTTPS"
http_method = "GET"
url_path = "/_haproxy_health_check"
expected_codes = "200"
delay = 20
timeout = 5
max_retries = 3
admin_state_up = true
}
output "main_loadbalancer_ip" {
description = "Main Load balancer IP address"
value = openstack_lb_loadbalancer_v2.main_lb.vip_address
}