In this section:
In the Google Cloud Platform, the Terraform module orchestrates the SBC HA pair with the following:
HFE 2.1 - Two HFE nodes, each with three interfaces
Refer to Configure HFE Nodes in GCP for details.
Prerequisites
Before executing the Terraform module, ensure that you:
Create a project in which the resources are kept.
Download the
accounts.json
file from your GCP account, and place it under the directory that contains the Terraform files. Refer to "Account Permissions for Terraform" of the page GCP Service Account Permissions for required permissions.Upload the
HFE_GCE.sh
script to a bucket. For more information, refer to Create a Bucket in Cloud storage for HFE script upload.Create a Service Account for running the instances. For more information, refer to GCP Service Account Permissions.
Ensure the following files are present under main source directory:
Do not create a router and a router-nat, if it already exists for the VPC network/subnet. If a router and router-nat containing all subnet ranges exist, you cannot create a new one.
To create a new router and router-nat, specify different VPC networks/subnets to avoid an error.
Create SBC HA with HFE
SBC HA with HFE 2.1
To create an HA SBC with HFE 2.1, modify the terraform.tfvars
under the directory sbc_hahfe_split/
, and execute the Terraform command listed at Create SBC HA or Standalone with HFE and Terraform.
A screenshot of terraform.tfvars
is shown below. Modify the highlighted parameters depending on the user account/requirements. You may modify the values of the other parameters based on the deployment.
The description of the parameters are available in variables.tf
.
terraform.tfvars for HA SBC With HFE 2.1:
project = "testdemo-215714"
region = "us-east1"
zone = "us-east1-c"
#resource config
service_account = "serviceaccount@testdemo-215714.iam.gserviceaccount.com"
#vpc resource
#While setting create_vpc/create_hfe_vpc to false, make sure the corresponding create_subnets/create_hfe_subnets is also set to false and provide the existing resource names for vpc/subnets/router.
create_vpc = "true"
create_hfe_vpc = "true"
vpc_names = ["tfmgt","tfha","tfpkt0","tfpkt1"]
vpc_names_hfe = ["tfhfepublic","tfhfeprivate"]
#subnet_creation resources
create_subnets = "true"
create_hfe_subnets = "true"
subnet_names = ["tfmgt-subnet","tfha-subnet","tfpkt0-subnet","tfpkt1-subnet"]
subnet_names_hfe = ["tfhfepublic-subnet","tfhfeprivate-subnet"]
subnet_ip_cidr_range = ["10.10.30.0/24","10.10.40.0/24","10.10.50.0/24","10.10.60.0/24"]
subnet_ip_cidr_range_hfe = ["10.10.10.0/24","10.10.20.0/24"]
private_ip_google_access = ["true","false","true","false"]
private_ip_google_access_hfe = ["true","true"]
# firewall resources
create_fw = "true"
ingress_fw_names = ["tf-fwing-mgt0-tcp", "tf-fwing-mgt0-udp", "tf-fwing-ha-tcp", "tf-fwing-pkt0-tcp", "tf-fwing-pkt0-udp", "tf-fwing-pkt1-tcp", "tf-fwing-pkt1-udp"]
egress_fw_names = ["tf-fwegr-mgt0", "tf-fwegr-ha", "tf-fwegr-pkt0", "tf-fwegr-pkt1"]
ingress_fw_names_hfe = ["tf-fwing-hfepub-tcp", "tf-fwing-hfepub-udp", "tf-fwing-hfepriv-tcp", "tf-fwing-hfepriv-udp"]
egress_fw_names_hfe = ["tf-fwegr-hfepub", "tf-fwegr-hfepriv"]
ingress_allow_protocol = "all"
ingress_allow_ports = []
egress_allow_protocol = "all"
egress_allow_ports = []
ingress_allow_protocol1 = "tcp"
ingress_allow_protocol2 = "udp"
ingress_allow_protocol3 = "icmp"
ingress_allow_ports1_mgt = ["22", "2022", "2024", "80", "443", "444"]
ingress_allow_ports2_mgt = ["123", "161", "162", "3054", "3055", "3057", "5093"]
ingress_allow_ports1_pkt = ["5061"]
ingress_allow_ports2_pkt = ["5060"]
ingress_allow_ports_pkt_custom_udp = ["1024-65535"]
ingress_fw_name_pkt0_custom_udp = "tfsbc-fwing-pkt0-custom-udp"
ingress_fw_name_pkt1_custom_udp = "tfsbc-fwing-pkt1-custom-udp"
ingress_fw_name_pkt0_icmp = "sbc-fwing-pkt0-icmp"
ingress_fw_name_pkt1_icmp = "sbc-fwing-pkt1-icmp"
source_ranges_mgt = [ "0.0.0.0/0"]
source_ranges_pkt0 = [ "0.0.0.0/0"]
source_ranges_pkt1 = [ "10.10.60.0/24", "10.10.20.0/24"]
destination_ranges = [ "0.0.0.0/0"]
#eips resources
create_eip_mgt = "true"
mgt_eip_name_list = ["tfmgt0-active-eip","tfmgt0-standby-eip","tfhfe-pkt0-mgt-eip","tfhfe-pkt1-mgt-eip"]
create_eip_pkt0 = "false"
pkt0_eip_name_list = [ ]
create_eip_pkt1 = "false"
pkt1_eip_name_list = [ ]
create_eip_hfe_pkt0 = "true"
hfe_pkt0_eip_name_list = [ "tfhfe-sig-eip"]
#private IP resources
create_privateip = "true"
sbc_active_privateips_name_list = ["mgt-active-ip", "ha-active-ip", "pkt0-active-ip", "pkt1-active-ip" ]
sbc_standby_privateips_name_list = ["mgt-standby-ip", "ha-standby-ip", "pkt0-standby-ip", "pkt1-standby-ip" ]
hfe_pkt0_privateips_name_list = ["hfe-pkt0-eth0-ip", "hfe-pkt0-eth1-ip", "hfe-pkt0-eth2-ip"]
hfe_pkt1_privateips_name_list = ["hfe-pkt1-eth0-ip", "hfe-pkt1-eth1-ip", "hfe-pkt1-eth2-ip"]
sbc_active_pktaliasips_name_list = ["pkt0-active-alias", "pkt1-active-alias"]
sbc_standby_pktaliasips_name_list = ["pkt0-standby-alias", "pkt1-standby-alias"]
# key_pair resources
#If generate_ssh_key set to true then no need of ssh_key_path and ssh_key_name
ssh_key_path = "generated_ssh_keys"
ssh_key_name = "sbc_hfe_key"
ssh_key_name_admin = "sbc_admin_key"
generate_ssh_key = "true"
generate_ssh_key_admin = "true"
# need to set this only when generate_ssh_key is set to false to set ssh key for ssh_user_name user
ssh_user_public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDOXBFYAd6vFwgYuO445XN5WQn3iEuL8BCz2Uom9wWHuTv2bpU/x7hGTmVZ/j76I5y3WQQGX7jsQwDAq+HvxHUtXODoj4jQN84Dr73JgBICiESRBY5MA2ypOPz1H/UpQ4lfXPCsNA+idcQo8GUG6XBQ+8K4JED5ZONzQ2t4PoFt8JVpeUPkx2O8v9v6lyXbCz/uQeTV0EbcdwgH9kLEXq5rHlaJKUjlusrMJs7SBOawICJEu4IY2EE4mH8PlMCuqD1alTXogycGA5Rh9ChXpjXOvXZ688KRckFNHC0pE5a6hebtTLFonQIS6qJ++CDUQxM85B+O6fFDHft/wiZB/jzN"
# need to set this only when generate_ssh_key_admin is set to false to set ssh key for ssh_user_name admin
admin_ssh_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDXhoIvn0yAQqy2qV0yEP8G4tXP4F4vmEx+IbjT4Alke41oY34j2xfm3eDw/s6CJCdlYOwPli0M9rKbOZVfsVyFRke5oHkcWBsU7WuryBT5z/uba6j2wQiXjN6rvvvJ3SpzYotvhBlS98US0QwRsKQBgakrOASKdsGmtJxZ1RWOg1NGuhtZ8Q6LLEIt2xxHSlC+gbW7glzARbmma6yg4t+bnTybtqfffZRSQCs6qmHFHK1mI9LUY3yCRl7GYYyQd9Bkv5uUJaSZ4I91fItYPnVGia27MrZair5T4z4Cgorzuzilq91lZ/JPi2zSozeOter8mmYe/YRIyavnEWMjIdlx"
#route resources
route_names = ["hferoute-pkt0","hferoute-pkt1"]
route_priority = "1000"
route_destination_range = "0.0.0.0/0"
#router resources
create_router = "true"
router_name = "router1"
create_router_nat = "true"
router_nat_name = "routernat1"
# template file user data variable
ce_name_active = "vsbc1"
ce_name_standby = "vsbc2"
sbc_personality = "isbc"
rnat_pkt0 = "True"
rnat_pkt1 = "True"
system_name = "vsbcSystem"
third_party_cpu_allocation = "0"
third_party_memory_allocation = "0"
mgt_prefix = "24"
sbc_ha_mode = "1to1"
#sbc/hfe instance resources
use_node_affinity = "true"
create_node_affinity = "true"
node_template_name = "node-template-1"
node_group_name = ["node-group-1", "node-group-2"]
node_type = "n1-node-96-624"
number_of_nodes = "1"
instance_name_hfe_pkt0 = "hfepkt0"
instance_name_hfe_pkt1 = "hfepkt1"
instance_name_sbc_active = "sbc1"
instance_name_sbc_standby = "sbc2"
image_hfe = "ubuntu-1910"
image_sbc = "release-sbc-v07-02-03s400-01-23-20-15-14"
image_project = "imagesharingproject"
machine_type_sbc = "custom-8-30720"
machine_type_hfe = "custom-6-15360"
processor_type_sbc = "Intel Broadwell"
disk_type = "pd-ssd"
sbc_disk_size = "65"
hfe_disk_size = "10"
sbc_gpu_type = "nvidia-tesla-v100"
sbc_gpu_number = "1"
use_gpu = "false"
remote_ssh_ip = "208.45.178.4"
hfe_script_url = "<url>"
sbc_tag_name = "ribbon-sbc"
ssh_user_name_sbc = "linuxadmin"
ssh_user_name_hfe = "ubuntu"
Create Standalone SBC
To create a Standalone SBC, modify the terraform.tfvars
under the directory sbc_stand_alone/
, and execute the Terraform command listed at Create SBC HA or Standalone with HFE and Terraform.
A screenshot of terraform.tfvars
is shown below.
Modify the highlighted parameters based on the user account/requirements. You may modify values for other parameters based on the deployment. The description of the parameters are in variables.tf
.
terraform.tfvars for Standalone SBC:
#provider
region = "us-east1"
project = "testdemo-215714"
zone = "us-east1-c"
#resource config
service_account = "serviceaccount@testdemo-215714.iam.gserviceaccount.com"
# vpc resource
create_vpc = "true"
vpc_names = ["sbc-mgt0-test1", "sbc-ha-test1", "sbc-pkt0-test1", "sbc-pkt1-test1"]
# subnet_creation resources
create_subnets = "true"
subnet_names = ["sbc-subnet-mgt0", "sbc-subnet-ha", "sbc-subnet-pkt0", "sbc-subnet-pkt1"]
subnet_ip_cidr_range = ["10.10.30.0/24", "10.10.40.0/24", "10.10.50.0/24", "10.10.60.0/24"]
private_ip_google_access = ["true", "false", "true", "false"]
# firewall resources
create_fw = "true"
ingress_allow_protocol1 = "tcp"
ingress_allow_protocol2 = "udp"
ingress_allow_ports1_mgt = ["22", "2022", "2024", "80", "443", "444"]
ingress_allow_ports2_mgt = ["123", "161", "162", "3054", "3055", "3057", "5093"]
ingress_allow_ports1_pkt = ["5061"]
ingress_allow_ports2_pkt = ["5060"]
ingress_allow_ports_pkt_custom_udp = ["1024-65535"]
egress_allow_protocol = "all"
ingress_fw_names = ["sbc-fwing-mgt0-tcp", "sbc-fwing-mgt0-udp", "sbc-fwing-ha", "sbc-fwing-pkt0-tcp", "sbc-fwing-pkt0-udp", "sbc-fwing-pkt1-tcp", "sbc-fwing-pkt1-udp"]
ingress_fw_name_pkt0_custom_udp = "sbc-fwing-pkt0-custom-udp"
ingress_fw_name_pkt1_custom_udp = "sbc-fwing-pkt1-custom-udp"
egress_fw_names = ["sbc-fwegr-mgt0", "sbc-fwegr-ha", "sbc-fwegr-pkt0", "sbc-fwegr-pkt1"]
source_ranges = ["0.0.0.0/0"]
destination_ranges = ["0.0.0.0/0"]
priority = "1000"
# template file user data variable
ce_name = "vsbc1"
sbc_personality = "isbc"
rnat_pkt0 = "True"
rnat_pkt1 = "False"
system_name = "vsbcSystem"
third_party_cpu_allocation = "0"
third_party_memory_allocation = "0"
mgt_prefix = "24"
# key_pair resources
#If generate_ssh_key set to true then no need of ssh_key_path and ssh_key_name
ssh_key_path = "generated_ssh_keys"
ssh_key_name = "sbc_key"
generate_ssh_key = "true"
# need to set this only when generate_ssh_key is set to false to set ssh key for ssh_user_name user
ssh_user_public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDOXBFYAd6vFwgYuO445XN5WQn3iEuL8BCz2Uom9wWHuTv2bpU/x7hGTmVZ/j76I5y3WQQGX7jsQwDAq+HvxHUtXODoj4jQN84Dr73JgBICiESRBY5MA2ypOPz1H/UpQ4lfXPCsNA+idcQo8GUG6XBQ+8K4JED5ZONzQ2t4PoFt8JVpeUPkx2O8v9v6lyXbCz/uQeTV0EbcdwgH9kLEXq5rHlaJKUjlusrMJs7SBOawICJEu4IY2EE4mH8PlMCuqD1alTXogycGA5Rh9ChXpjXOvXZ688KRckFNHC0pE5a6hebtTLFonQIS6qJ++CDUQxM85B+O6fFDHft/wiZB/jzN"
# sbc instance resources
use_node_affinity = "true"
create_node_affinity = "true"
node_template_name = "node-template-1"
node_group_name = ["node-group-1"]
node_type = "n1-node-96-624"
number_of_nodes = "1"
instance_name_sbc = "sbc-standalone-test"
processor_type_sbc = "Intel Broadwell"
disk_size_sbc = "65"
image_sbc = "release-sbc-v07-02-01s400-08-30-19-14-40"
disk_type_sbc = "pd-ssd"
machine_type_sbc = "n1-standard-4"
image_project = "imagesharingproject"
ssh_user_name = "linuxadmin"
sbc_tag_name = "ribbon-sbc"
admin_ssh_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDOXBFYAd6vFwgYuO445XN5WQn3iEuL8BCz2Uom9wWHuTv2bpU/x7hGTmVZ/j76I5y3WQQGX7jsQwDAq+HvxHUtXODoj4jQN84Dr73JgBICiESRBY5MA2ypOPz1H/UpQ4lfXPCsNA+idcQo8GUG6XBQ+8K4JED5ZONzQ2t4PoFt8JVpeUPkx2O8v9v6lyXbCz/uQeTV0EbcdwgH9kLEXq5rHlaJKUjlusrMJs7SBOawICJEu4IY2EE4mH8PlMCuqD1alTXogycGA5Rh9ChXpjXOvXZ688KRckFNHC0pE5a6hebtTLFonQIS6qJ++CDUQxM85B+O6fFDHft/wiZB/jzN"
sbc_gpu_type = "nvidia-tesla-k80"
sbc_gpu_number = "2"
use_gpu = "false"
#eips resources
create_eip_mgt = "true"
mgt_eip_name_list = ["mgt0-stand-alone-eip"]
create_eip_pkt0 = "true"
pkt0_eip_name_list = []
create_eip_pkt1 = "false"
pkt1_eip_name_list = []
#private IP resources
create_privateip = "true"
sbc_stand_alone_privateips_name_list = ["mgt-stand-alone-ip", "ha-stand-alone-ip", "pkt0-stand-alone-ip", "pkt1-stand-alone-ip"]
sbc_stand_alone_pktaliasips_name_list = ["pkt0-stand-alone-alias", "pkt1-stand-alone-alias"]
Execute Terraform Command
After all the parameters are updated in the previous chosen configuration, execute the following command:
terraform init terraform validate terraform plan terraform apply terraform destroy