Chris Mague há 4 anos atrás
pai
commit
33f7d889bf

+ 8 - 0
.gitignore

@@ -1,3 +1,11 @@
+cloud-tmux
 .terraform/
 terraform.tfstate*
+<<<<<<< HEAD
 cloud-tmux
+=======
+ansible/*.retry
+inventories/*.ini
+ansible/raft_group_setup
+ansible/roles/*
+>>>>>>> 650c625a384961385a2361e14ba70eeeceb21a30

+ 35 - 0
README.md

@@ -16,3 +16,38 @@ sed -e s/\"//g >> ~/.ssh/maguec1.pem
 chmod 0600 ~/.ssh/maguec1.pem
 ```
 
+# After ansible
+
+```
+# download roles
+cd ansible && rm -rf roles/* && ansible-galaxy install --roles-path roles -r requirements.yml
+```
+
+
+```
+ansible-playbook ansible/cp_nodes.yml --private-key ~/.ssh/maguec1.pem -i inventories/maguec1_all_nodes.ini
+```
+
+```
+ansible-playbook ansible/test_nodes.yml --private-key ~/.ssh/maguec1.pem -i inventories/tester.ini 
+```
+
+## Resetting 
+
+On each node run:
+
+```
+for i in 1990{1..9} 1991{0..2}; do   sudo systemctl daemon-reload;   sudo systemctl stop redis-${i};   sudo rm -rf /redis/persistant/cpredis/199*.*;   sudo truncate -s 0 /var/log/cpredis/*.log;   sudo systemctl start redis-${i}; done
+```
+
+On node1 run:
+
+```
+~/raft_group_setup_3 or _5 dependin on the size
+```
+
+On the tester node to reset the twemproxy:
+
+```
+ sudo systemctl stop twemproxy-template.service && sudo systemctl stop twemproxy && sudo rm -rf /etc/nutcracker.yml && sudo systemctl start twemproxy-template.service
+```

+ 6 - 0
ansible/README.md

@@ -0,0 +1,6 @@
+#Resetting
+
+for i in 1991{0..2} 1990{1..9}; do sudo systemctl stop redis-${i}; done
+sudo rm  -f /redis/persistant/cpredis/*.*
+for i in 1991{0..2} 1990{1..9}; do sudo systemctl start redis-${i}; done
+

+ 141 - 0
ansible/cp_nodes.yml

@@ -0,0 +1,141 @@
+---
+
+- hosts: all
+  become: yes
+  become_user: root
+  become_method: sudo
+  gather_facts: yes
+
+  vars:
+    consul_ui: true
+    consul_server: true
+    consul_server_nodes:
+      - 10.161.8.1
+      - 10.161.16.94
+      - 10.161.13.176
+      - 10.161.14.88
+      - 10.161.18.146
+    redis_ports:
+      - 19901
+      - 19902
+      - 19903
+      - 19904
+      - 19905
+      - 19906
+      - 19907
+      - 19908
+      - 19909
+      - 19910
+      - 19911
+      - 19912
+
+  handlers:
+    - name: restart_consul
+      service:
+        name: consul
+        state: restarted
+    - name: reload_redis_services
+      command: systemctl daemon-reload
+    - name: restart_redis_services
+      systemd:
+        name: "redis-{{ item }}"
+        state: restarted
+      with_items: "{{ redis_ports }}"
+  
+  
+  
+  pre_tasks:
+    - name: Update Apt Cache
+      apt: update_cache=yes cache_valid_time=86400
+      when: ansible_os_family == "Debian"
+    - name: Ubuntu Packages
+      apt: >
+        pkg={{item}}
+        state=present
+      with_items:
+        - build-essential
+        - autoconf
+        - automake
+        - libbsd-dev
+        - libltdl-dev
+        - libltdl7
+        - libtool
+  tasks:
+    - name: create redis user
+      user:
+        name: redis
+        uid: 4001
+        shell: /bin/bash
+        state: present
+    - name: copy the redis raft library
+      copy:
+        src: redisraft.so
+        dest: /usr/lib/redisraft.so
+        owner: redis
+        group: redis
+        mode: '0755'
+      notify:
+        - restart_redis_services
+    - name: create raft directories
+      file:
+        path: /redis/persistant/cpredis
+        owner: redis
+        group: redis
+        mode: '0755'
+        state: directory
+    - name: create raft directories
+      file:
+        path: /var/log/cpredis
+        owner: redis
+        group: redis
+        mode: '0755'
+        state: directory
+    - name: checkout redis git repo
+      git:
+        repo: 'https://github.com/antirez/redis'
+        dest: /home/redis/redis
+        version: 6.0
+    - name: Build the default target
+      make:
+        chdir: /home/redis/redis
+    - name: Run 'install' target as root
+      make:
+        chdir: /home/redis/redis
+        target: install
+      become: yes
+
+    - name: Redis Systemd 
+      template:
+        src: systemd_redis.j2
+        dest: /lib/systemd/system/redis-{{item}}.service
+        owner: root
+        group: root
+        mode: 0644
+      with_items: "{{ redis_ports }}"
+      notify:
+        - reload_redis_services
+        - restart_redis_services
+
+    - name: Enable Redis Services
+      systemd:
+        name: redis-{{item}}
+        enabled: yes
+        state: started
+      with_items: "{{ redis_ports }}"
+
+  roles:
+    - consul
+    - ntp
+
+  post_tasks:
+    - name: Redis Consul Services 
+      template:
+        src: redis.services.j2
+        dest: /opt/consul/consul.d/redis.json
+        owner: consul
+        group: consul
+        mode: 0644
+      notify:
+        - restart_consul
+
+

BIN
ansible/files/redisraft.so


+ 24 - 0
ansible/files/twemproxy.ctmpl

@@ -0,0 +1,24 @@
+cpredis:
+  listen: 0.0.0.0:2112
+  hash: fnv1a_64
+  distribution: ketama
+  timeout: 1000
+  preconnect: true
+  redis: true
+  auto_eject_hosts: false
+  server_retry_timeout: 2000
+  backlog: 2048
+  server_connections: 10
+  servers:
+{{ range service "redis-19901" }}    - {{ .Address }}:{{ .Port }}:1{{ end}}
+{{ range service "redis-19902" }}    - {{ .Address }}:{{ .Port }}:1{{ end}}
+{{ range service "redis-19903" }}    - {{ .Address }}:{{ .Port }}:1{{ end}}
+{{ range service "redis-19904" }}    - {{ .Address }}:{{ .Port }}:1{{ end}}
+{{ range service "redis-19905" }}    - {{ .Address }}:{{ .Port }}:1{{ end}}
+{{ range service "redis-19906" }}    - {{ .Address }}:{{ .Port }}:1{{ end}}
+{{ range service "redis-19907" }}    - {{ .Address }}:{{ .Port }}:1{{ end}}
+{{ range service "redis-19908" }}    - {{ .Address }}:{{ .Port }}:1{{ end}}
+{{ range service "redis-19909" }}    - {{ .Address }}:{{ .Port }}:1{{ end}}
+{{ range service "redis-19910" }}    - {{ .Address }}:{{ .Port }}:1{{ end}}
+{{ range service "redis-19911" }}    - {{ .Address }}:{{ .Port }}:1{{ end}}
+{{ range service "redis-19912" }}    - {{ .Address }}:{{ .Port }}:1{{ end}}

+ 7 - 0
ansible/requirements.yml

@@ -0,0 +1,7 @@
+- src: https://github.com/idealista/consul_role.git
+  version: 1.0.0
+  name: consul
+- src: geerlingguy.ntp
+  name: ntp
+- src: https://github.com/griggheo/ansible-consul-template
+  name: consul-template

+ 9 - 0
ansible/templates/redis.services.j2

@@ -0,0 +1,9 @@
+{"services": [
+{% for port in redis_ports -%}
+  {"name": "redis-{{ port }}",
+  "port": {{ port }},
+  "checks": [{"script": "redis-cli -p {{ port }} --raw RAFT.INFO  |grep -q '^role:leader'", "interval": "0.5s"}]
+  }{% if port != redis_ports[-1] %},{% endif %}
+{% endfor -%}
+]}
+

+ 22 - 0
ansible/templates/systemd_redis.j2

@@ -0,0 +1,22 @@
+#####################################################################
+# {{ ansible_managed }}
+# {{ item }}
+
+#####################################################################
+[Unit]
+Description=Redis Server Port {{ item }}
+After=network.target
+
+[Service]
+WorkingDirectory=/redis/persistant/cpredis
+Type=simple
+LimitNOFILE=65535
+User=redis
+ExecStart=/usr/local/bin/redis-server --port {{ item }} --protected-mode no --dbfilename {{ item }}-redis.rdb --loadmodule /usr/lib/redisraft.so addr={{ ansible_default_ipv4.address }}:{{ item }} raft-log-fsync=no follower-proxy=yes raft-log-filename={{ item }}-raftlog.db
+StandardOutput=file:/var/log/cpredis/redis-{{ item }}.log
+StandardError=file:/var/log/cpredis/redis-{{ item }}-error.log
+
+
+[Install]
+WantedBy=multi-user.target
+Alias=redis-{{ item }}.service

+ 21 - 0
ansible/templates/systemd_twemproxy-template.j2

@@ -0,0 +1,21 @@
+#####################################################################
+# {{ ansible_managed }}
+
+#####################################################################
+[Unit]
+Description=Redis Twemproxy Template
+After=network.target
+
+[Service]
+WorkingDirectory=/redis
+Type=simple
+LimitNOFILE=65535
+User=root
+ExecStart=/opt/consul-template/bin/consul-template -consul="{{ consul_server_nodes[0] }}:8500" -template '/etc/nutcracker.ctmpl:/etc/nutcracker.yml:/bin/systemctl restart twemproxy.service'
+StandardOutput=file:/var/log/consul-template-twemproxy.log
+StandardError=file:/var/log/consul-template-twemproxy-error.log
+
+
+[Install]
+WantedBy=multi-user.target
+Alias=twemproxy-template.service

+ 19 - 0
ansible/templates/systemd_twemproxy.j2

@@ -0,0 +1,19 @@
+#####################################################################
+# {{ ansible_managed }}
+
+#####################################################################
+[Unit]
+Description=Redis Twemproxy
+After=network.target  twemproxy-template.service
+
+[Service]
+WorkingDirectory=/redis
+Type=simple
+LimitNOFILE=65535
+User=redis
+ExecStart=/usr/local/sbin/nutcracker -c /etc/nutcracker.yml -m 163840
+
+
+[Install]
+WantedBy=multi-user.target
+Alias=twemproxy.service

+ 165 - 0
ansible/test_nodes.yml

@@ -0,0 +1,165 @@
+---
+
+- hosts: all
+  become: yes
+  become_user: root
+  become_method: sudo
+  gather_facts: yes
+
+  vars:
+    consul_server_nodes:
+      - 10.161.8.1
+      - 10.161.16.94
+      - 10.161.13.176
+
+  pre_tasks:
+    - name: Update Apt Cache
+      apt: update_cache=yes cache_valid_time=86400
+      when: ansible_os_family == "Debian"
+    - name: Ubuntu Packages
+      apt: >
+        pkg={{item}}
+        state=present
+      with_items:
+        - build-essential
+        - autoconf
+        - automake
+        - libbsd-dev
+        - libltdl-dev
+        - libltdl7
+        - libtool
+        - libevent-openssl-2.1-6
+        - libpcre3-dev 
+        - libevent-dev
+        - pkg-config
+        - zlib1g-dev
+        - libssl-dev
+
+  tasks:
+    - name: create redis user
+      user:
+        name: redis
+        uid: 4001
+        shell: /bin/bash
+        state: present
+
+    - name: checkout redis git repo
+      git:
+        repo: 'https://github.com/antirez/redis'
+        dest: /home/redis/redis
+        version: 6.0
+
+    - name: Build the default target
+      make:
+        chdir: /home/redis/redis
+
+    - name: Run 'install' target as root
+      make:
+        chdir: /home/redis/redis
+        target: install
+      become: yes
+
+    - name: checkout memtier_benchmark git repo
+      git:
+        repo: 'https://github.com/RedisLabs/memtier_benchmark'
+        dest: /home/redis/memtier_benchmark
+        version: 1.2.17
+
+    - name: Run the autoconf
+      command: autoreconf -ivf
+      args:
+        chdir: /home/redis/memtier_benchmark
+        creates: /home/redis/memtier_benchmark/configure
+
+    - name: Run the configure script first
+      command: ./configure
+      args:
+        chdir: /home/redis/memtier_benchmark
+        creates: /home/redis/memtier_benchmark/Makefile
+
+    - name: Build the default target
+      make:
+        chdir: /home/redis/memtier_benchmark
+
+    - name: Run 'memtier_benchmark install' target as root
+      make:
+        chdir: /home/redis/memtier_benchmark
+        target: install
+      become: yes
+
+    - name: checkout twemproxy git repo
+      git:
+        repo: 'https://github.com/twitter/twemproxy'
+        dest: /home/redis/twemproxy
+        version: master
+
+    - name: Run the autoconf in twemproxy
+      command: autoreconf -ivf
+      args:
+        chdir: /home/redis/twemproxy
+        creates: /home/redis/twemproxy/configure
+
+    - name: Run the twemproxy configure
+      command: ./configure
+      args:
+        chdir: /home/redis/twemproxy
+        creates: /home/redis/twemproxy/Makefile
+
+    - name: Build the twemporxy target
+      make:
+        chdir: /home/redis/twemproxy
+
+    - name: Run 'twemproxy install' target as root
+      make:
+        chdir: /home/redis/twemproxy
+        target: install
+      become: yes
+  roles:
+    - ntp
+      #    - { role: consul-template,
+      #        consul_template_consul_server: 10.161.8.1,
+      #        consul_template_use_upstart: false }
+
+  post_tasks:
+    - name: create the redis dir
+      file:
+        path: /redis
+        owner: redis
+        group: redis
+        mode: '0755'
+        state: directory
+    - name: Twemproxy-Template Systemd 
+      template:
+        src: systemd_twemproxy-template.j2
+        dest: /lib/systemd/system/twemproxy-template.service
+        owner: root
+        group: root
+        mode: 0644
+    - name: Twemproxy Systemd 
+      template:
+        src: systemd_twemproxy.j2
+        dest: /lib/systemd/system/twemproxy.service
+        owner: root
+        group: root
+        mode: 0644
+    - name: copy the nutcracker template file
+      copy:
+        src: twemproxy.ctmpl
+        dest: /etc/nutcracker.ctmpl
+        owner: redis
+        group: redis
+        mode: '0644'
+    - name: create a temporary twemproxy config file
+      shell: "echo \"cpredis:\n  listen: 0.0.0.0:2112\n  servers:\n    - localhost:19901:1\" > /etc/nutcracker.yml"
+      args:
+        creates: /etc/nutcracker.yml
+    - name: Enable Twemproxy Service
+      systemd:
+        name: twemproxy.service
+        enabled: yes
+        state: started
+    - name: Enable Twemproxy-Template Service
+      systemd:
+        name: twemproxy-template.service
+        enabled: yes
+        state: started

+ 18 - 0
inputs.tf

@@ -1 +1,19 @@
 data "aws_caller_identity" "current" {}
+
+data "aws_ami" "re-ami" {
+  most_recent = true
+  name_regex  = "ubuntu-bionic-18.04-amd64-server"
+  # This is Canonical's ID
+  owners = ["099720109477"]
+
+  filter {
+    name   = "root-device-type"
+    values = ["ebs"]
+  }
+
+  filter {
+    name   = "virtualization-type"
+    values = ["hvm"]
+  }
+}
+

+ 6 - 0
maguec1.tfvars

@@ -4,3 +4,9 @@ open-nets = ["76.14.80.208/32"]
 vpc-azs   = ["us-west-1a", "us-west-1c"]
 vpc-cidr  = "10.161.0.0/16"
 vpc-name  = "maguec1"
+common-tags     = {
+  "Owner"       = "maguec"
+  "Project"     = "cp-redis"
+  "Environment" = "test"
+}
+

+ 91 - 0
main.tf

@@ -22,14 +22,21 @@ module "nodes" {
   region           = var.region
   profile          = var.profile
   open-nets        = ["76.14.80.208/32"]
+<<<<<<< HEAD
   data-node-count  = 3
   re-volume-size   = 500
   re-instance-type = "m5.xlarge"
+=======
+  data-node-count  = 5
+  re-volume-size   = 250
+  re-instance-type = "m5.4xlarge"
+>>>>>>> 650c625a384961385a2361e14ba70eeeceb21a30
   vpc-cidr         = var.vpc-cidr
   vpc-azs          = var.vpc-azs
   vpc-name         = var.vpc-name
   vpc-id           = module.vpc.vpc-id
   vpc-subnets      = module.vpc.subnets-public
+  allow-public-ssh = 1
   enable-flash     = true
   common-tags = {
     "Owner"   = "maguec"
@@ -37,6 +44,7 @@ module "nodes" {
   }
 }
 
+<<<<<<< HEAD
 module "tmuxer" {
   source  = "../tfmodule-cloud-tmux"
   ssh-key = "${var.vpc-name}.pem"
@@ -57,4 +65,87 @@ module "dns-lb" {
     "Owner"   = "maguec"
     "Project" = "AWS Testing Environment"
   }
+=======
+module "tmux" {
+  source       = "../tfmodule-cloud-tmux"
+  session-name = "cpredis"
+  ssh-key      = "${var.vpc-name}.pem"
+  user         = "ubuntu"
+  nodes        = concat(module.nodes.node-ips, [aws_eip.re-eip.public_ip])
+  outpath      = "${path.module}"
 }
+
+
+# Start CP Redis Testernode
+
+resource "aws_instance" "re" {
+  ami                    = data.aws_ami.re-ami.id
+  instance_type          = "m5.4xlarge"
+  availability_zone      = "${element(var.vpc-azs, 1)}"
+  subnet_id              = "${element(module.vpc.subnets-public, 1)}"
+  vpc_security_group_ids = [module.nodes.re-security-group]
+  source_dest_check      = false
+  key_name               = var.vpc-name
+  tags                   = merge({ Name = "Tester-${var.vpc-name}-1" }, var.common-tags)
+
+}
+
+resource "aws_eip" "re-eip" {
+  vpc   = true
+  tags  = merge({ Name = "${var.vpc-name}-node-eip-1" }, var.common-tags)
+}
+
+resource "aws_eip_association" "re-eip-assoc" {
+  instance_id   = "${element(aws_instance.re.*.id, 1)}"
+  allocation_id = "${element(aws_eip.re-eip.*.id, 1)}"
+  depends_on    = ["aws_instance.re", "aws_eip.re-eip"]
+}
+
+
+resource "null_resource" "remote-config" {
+  provisioner "remote-exec" {
+    connection {
+      user        = "ubuntu"
+      host        = aws_eip.re-eip.public_ip
+      private_key = "${file("~/.ssh/${var.vpc-name}.pem")}"
+      agent       = true
+    }
+    inline = ["sudo apt update > /dev/null  && sudo apt install -y python python-pip > /dev/null"]
+  }
+  depends_on = ["aws_instance.re", "aws_eip_association.re-eip-assoc"]
+}
+
+
+output "node-ips" {
+  value = formatlist("ssh -i ~/.ssh/${var.vpc-name}.pem -o IdentitiesOnly=yes  -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ubuntu@%s", module.nodes.node-ips)
+}
+
+output "tester-ips" {
+  value = formatlist("ssh -i ~/.ssh/${var.vpc-name}.pem -o IdentitiesOnly=yes  -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ubuntu@%s", aws_eip.re-eip.public_ip)
+}
+
+output "tmux" {
+  value = "run ./cloud-tmux to enable tmux connections"
+}
+
+data "template_file" "raft_group_setup" {
+  template = "${file("${path.module}/raft_group_setup.tpl")}"
+  vars = {
+    node1 = module.nodes.node-internal-ips[0]
+    node2 = module.nodes.node-internal-ips[1]
+    node3 = module.nodes.node-internal-ips[2]
+    node4 = module.nodes.node-internal-ips[3]
+    node5 = module.nodes.node-internal-ips[4]
+  }
+}
+
+resource "null_resource" "raft_group_setup" {
+  provisioner "local-exec" {
+    command = "echo \"${data.template_file.raft_group_setup.rendered}\" > ${path.module}/ansible/raft_group_setup"
+  }
+  depends_on = ["data.template_file.raft_group_setup"]
+>>>>>>> 650c625a384961385a2361e14ba70eeeceb21a30
+}
+
+
+

+ 92 - 0
raft_group_setup.tpl

@@ -0,0 +1,92 @@
+#!/bin/bash
+
+#############################################################################
+#  Setting up the raft groups
+#  
+#  IPS:
+#  ${node1}
+#  ${node2}
+#  ${node3}
+#  ${node4}
+#  ${node5}
+#  
+#  Run once from from any node
+#############################################################################
+
+# setup clusters
+redis-cli -h ${node1} -p 19901 raft.cluster init
+redis-cli -h ${node1} -p 19902 raft.cluster init
+redis-cli -h ${node1} -p 19903 raft.cluster init
+redis-cli -h ${node1} -p 19904 raft.cluster init
+
+redis-cli -h ${node2} -p 19905 raft.cluster init
+redis-cli -h ${node2} -p 19906 raft.cluster init
+redis-cli -h ${node2} -p 19907 raft.cluster init
+redis-cli -h ${node2} -p 19908 raft.cluster init
+
+redis-cli -h ${node3} -p 19909 raft.cluster init
+redis-cli -h ${node3} -p 19910 raft.cluster init
+redis-cli -h ${node3} -p 19911 raft.cluster init
+redis-cli -h ${node3} -p 19912 raft.cluster init
+
+# setup groups
+redis-cli -h ${node2} -p 19901 raft.cluster join ${node1}:19901
+redis-cli -h ${node3} -p 19901 raft.cluster join ${node1}:19901
+redis-cli -h ${node4} -p 19901 raft.cluster join ${node1}:19901
+redis-cli -h ${node5} -p 19901 raft.cluster join ${node1}:19901
+
+redis-cli -h ${node2} -p 19902 raft.cluster join ${node1}:19902
+redis-cli -h ${node3} -p 19902 raft.cluster join ${node1}:19902
+redis-cli -h ${node4} -p 19902 raft.cluster join ${node1}:19902
+redis-cli -h ${node5} -p 19902 raft.cluster join ${node1}:19902
+
+redis-cli -h ${node2} -p 19903 raft.cluster join ${node1}:19903
+redis-cli -h ${node3} -p 19903 raft.cluster join ${node1}:19903
+redis-cli -h ${node4} -p 19903 raft.cluster join ${node1}:19903
+redis-cli -h ${node5} -p 19903 raft.cluster join ${node1}:19903
+
+redis-cli -h ${node2} -p 19904 raft.cluster join ${node1}:19904
+redis-cli -h ${node3} -p 19904 raft.cluster join ${node1}:19904
+redis-cli -h ${node4} -p 19904 raft.cluster join ${node1}:19904
+redis-cli -h ${node5} -p 19904 raft.cluster join ${node1}:19904
+
+redis-cli -h ${node1} -p 19905 raft.cluster join ${node2}:19905
+redis-cli -h ${node3} -p 19905 raft.cluster join ${node2}:19905
+redis-cli -h ${node4} -p 19905 raft.cluster join ${node2}:19905
+redis-cli -h ${node5} -p 19905 raft.cluster join ${node2}:19905
+
+redis-cli -h ${node1} -p 19906 raft.cluster join ${node2}:19906
+redis-cli -h ${node3} -p 19906 raft.cluster join ${node2}:19906
+redis-cli -h ${node4} -p 19906 raft.cluster join ${node2}:19906
+redis-cli -h ${node5} -p 19906 raft.cluster join ${node2}:19906
+
+redis-cli -h ${node1} -p 19907 raft.cluster join ${node2}:19907
+redis-cli -h ${node3} -p 19907 raft.cluster join ${node2}:19907
+redis-cli -h ${node4} -p 19907 raft.cluster join ${node2}:19907
+redis-cli -h ${node5} -p 19907 raft.cluster join ${node2}:19907
+
+redis-cli -h ${node1} -p 19908 raft.cluster join ${node2}:19908
+redis-cli -h ${node3} -p 19908 raft.cluster join ${node2}:19908
+redis-cli -h ${node4} -p 19908 raft.cluster join ${node2}:19908
+redis-cli -h ${node5} -p 19908 raft.cluster join ${node2}:19908
+
+redis-cli -h ${node1} -p 19909 raft.cluster join ${node3}:19909
+redis-cli -h ${node2} -p 19909 raft.cluster join ${node3}:19909
+redis-cli -h ${node4} -p 19909 raft.cluster join ${node3}:19909
+redis-cli -h ${node5} -p 19909 raft.cluster join ${node3}:19909
+
+redis-cli -h ${node1} -p 19910 raft.cluster join ${node3}:19910
+redis-cli -h ${node2} -p 19910 raft.cluster join ${node3}:19910
+redis-cli -h ${node4} -p 19910 raft.cluster join ${node3}:19910
+redis-cli -h ${node5} -p 19910 raft.cluster join ${node3}:19910
+
+redis-cli -h ${node1} -p 19911 raft.cluster join ${node3}:19911
+redis-cli -h ${node2} -p 19911 raft.cluster join ${node3}:19911
+redis-cli -h ${node4} -p 19911 raft.cluster join ${node3}:19911
+redis-cli -h ${node5} -p 19911 raft.cluster join ${node3}:19911
+
+redis-cli -h ${node1} -p 19912 raft.cluster join ${node3}:19912
+redis-cli -h ${node2} -p 19912 raft.cluster join ${node3}:19912
+redis-cli -h ${node4} -p 19912 raft.cluster join ${node3}:19912
+redis-cli -h ${node5} -p 19912 raft.cluster join ${node3}:19912
+

+ 7 - 1
variables.tf

@@ -18,4 +18,10 @@ variable "vpc-azs" {
 variable "open-nets" {
   type        = list
   description = "Networks that will have full access"
-}
+}
+
+variable "common-tags" {
+  type        = map(string)
+  description = "Tags that go everywhere"
+}
+