63292bd13c1c5447991ce31781452eae5f89293b
[it/test.git] / XTesting / kubespray / Vagrantfile
1 # -*- mode: ruby -*-
2 # # vi: set ft=ruby :
3
4 # For help on using kubespray with vagrant, check out docs/vagrant.md
5
6 require 'fileutils'
7
8 Vagrant.require_version ">= 2.0.0"
9
10 CONFIG = File.join(File.dirname(__FILE__), ENV['KUBESPRAY_VAGRANT_CONFIG'] || 'vagrant/config.rb')
11
12 FLATCAR_URL_TEMPLATE = "https://%s.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.json"
13
14 # Uniq disk UUID for libvirt
15 DISK_UUID = Time.now.utc.to_i
16
17 SUPPORTED_OS = {
18   "flatcar-stable"      => {box: "flatcar-stable",             user: "core", box_url: FLATCAR_URL_TEMPLATE % ["stable"]},
19   "flatcar-beta"        => {box: "flatcar-beta",               user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]},
20   "flatcar-alpha"       => {box: "flatcar-alpha",              user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]},
21   "flatcar-edge"        => {box: "flatcar-edge",               user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]},
22   "ubuntu1604"          => {box: "generic/ubuntu1604",         user: "vagrant"},
23   "ubuntu1804"          => {box: "generic/ubuntu1804",         user: "vagrant"},
24   "ubuntu2004"          => {box: "generic/ubuntu2004",         user: "vagrant"},
25   "centos"              => {box: "centos/7",                   user: "vagrant"},
26   "centos-bento"        => {box: "bento/centos-7.6",           user: "vagrant"},
27   "centos8"             => {box: "centos/8",                   user: "vagrant"},
28   "centos8-bento"       => {box: "bento/centos-8",             user: "vagrant"},
29   "almalinux8"          => {box: "almalinux/8",                user: "vagrant"},
30   "almalinux8-bento"    => {box: "bento/almalinux-8",          user: "vagrant"},
31   "rockylinux8"         => {box: "generic/rocky8",             user: "vagrant"},
32   "fedora35"            => {box: "fedora/35-cloud-base",       user: "vagrant"},
33   "fedora36"            => {box: "fedora/36-cloud-base",       user: "vagrant"},
34   "opensuse"            => {box: "opensuse/Leap-15.4.x86_64",  user: "vagrant"},
35   "opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
36   "oraclelinux"         => {box: "generic/oracle7",            user: "vagrant"},
37   "oraclelinux8"        => {box: "generic/oracle8",            user: "vagrant"},
38   "rhel7"               => {box: "generic/rhel7",              user: "vagrant"},
39   "rhel8"               => {box: "generic/rhel8",              user: "vagrant"},
40 }
41
42 if File.exist?(CONFIG)
43   require CONFIG
44 end
45
46 # Defaults for config options defined in CONFIG
47 $num_instances ||= 3
48 $instance_name_prefix ||= "k8s"
49 $vm_gui ||= false
50 $vm_memory ||= 2048
51 $vm_cpus ||= 2
52 $shared_folders ||= {}
53 $forwarded_ports ||= {}
54 $subnet ||= "172.18.8"
55 $subnet_ipv6 ||= "fd3c:b398:0698:0756"
56 $os ||= "ubuntu1804"
57 $network_plugin ||= "flannel"
58 # Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
59 $multi_networking ||= "False"
60 $download_run_once ||= "True"
61 $download_force_cache ||= "False"
62 # The first three nodes are etcd servers
63 $etcd_instances ||= $num_instances
64 # The first two nodes are kube masters
65 $kube_master_instances ||= $num_instances == 1 ? $num_instances : ($num_instances - 1)
66 # All nodes are kube nodes
67 $kube_node_instances ||= $num_instances
68 # The following only works when using the libvirt provider
69 $kube_node_instances_with_disks ||= false
70 $kube_node_instances_with_disks_size ||= "20G"
71 $kube_node_instances_with_disks_number ||= 2
72 $override_disk_size ||= false
73 $disk_size ||= "20GB"
74 $local_path_provisioner_enabled ||= "False"
75 $local_path_provisioner_claim_root ||= "/opt/local-path-provisioner/"
76 $libvirt_nested ||= false
77 # boolean or string (e.g. "-vvv")
78 $ansible_verbosity ||= false
79 $ansible_tags ||= ENV['VAGRANT_ANSIBLE_TAGS'] || ""
80
81 $playbook ||= "cluster.yml"
82
83 host_vars = {}
84
85 $box = SUPPORTED_OS[$os][:box]
86 # if $inventory is not set, try to use example
87 $inventory = "inventory/sample" if ! $inventory
88 $inventory = File.absolute_path($inventory, File.dirname(__FILE__))
89
90 # if $inventory has a hosts.ini file use it, otherwise copy over
91 # vars etc to where vagrant expects dynamic inventory to be
92 if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini"))
93   $vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant", "provisioners", "ansible")
94   FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
95   $vagrant_inventory = File.join($vagrant_ansible,"inventory")
96   FileUtils.rm_f($vagrant_inventory)
97   FileUtils.ln_s($inventory, $vagrant_inventory)
98 end
99
100 if Vagrant.has_plugin?("vagrant-proxyconf")
101   $no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
102   (1..$num_instances).each do |i|
103       $no_proxy += ",#{$subnet}.#{i+100}"
104   end
105 end
106
107 Vagrant.configure("2") do |config|
108
109   config.vm.box = $box
110   if SUPPORTED_OS[$os].has_key? :box_url
111     config.vm.box_url = SUPPORTED_OS[$os][:box_url]
112   end
113   config.ssh.username = SUPPORTED_OS[$os][:user]
114
115   # plugin conflict
116   if Vagrant.has_plugin?("vagrant-vbguest") then
117     config.vbguest.auto_update = false
118   end
119
120   # always use Vagrants insecure key
121   config.ssh.insert_key = false
122
123   if ($override_disk_size)
124     unless Vagrant.has_plugin?("vagrant-disksize")
125       system "vagrant plugin install vagrant-disksize"
126     end
127     config.disksize.size = $disk_size
128   end
129
130   (1..$num_instances).each do |i|
131     config.vm.define vm_name = "%s-%01d" % [$instance_name_prefix, i] do |node|
132
133       node.vm.hostname = vm_name
134
135       if Vagrant.has_plugin?("vagrant-proxyconf")
136         node.proxy.http     = ENV['HTTP_PROXY'] || ENV['http_proxy'] || ""
137         node.proxy.https    = ENV['HTTPS_PROXY'] || ENV['https_proxy'] ||  ""
138         node.proxy.no_proxy = $no_proxy
139       end
140
141       ["vmware_fusion", "vmware_workstation"].each do |vmware|
142         node.vm.provider vmware do |v|
143           v.vmx['memsize'] = $vm_memory
144           v.vmx['numvcpus'] = $vm_cpus
145         end
146       end
147
148       node.vm.provider :virtualbox do |vb|
149         vb.memory = $vm_memory
150         vb.cpus = $vm_cpus
151         vb.gui = $vm_gui
152         vb.linked_clone = true
153         vb.customize ["modifyvm", :id, "--vram", "8"] # ubuntu defaults to 256 MB which is a waste of precious RAM
154         vb.customize ["modifyvm", :id, "--audio", "none"]
155       end
156
157       node.vm.provider :libvirt do |lv|
158         lv.nested = $libvirt_nested
159         lv.cpu_mode = "host-model"
160         lv.memory = $vm_memory
161         lv.cpus = $vm_cpus
162         lv.default_prefix = 'kubespray'
163         # Fix kernel panic on fedora 28
164         if $os == "fedora"
165           lv.cpu_mode = "host-passthrough"
166         end
167       end
168
169       if $kube_node_instances_with_disks
170         # Libvirt
171         driverletters = ('a'..'z').to_a
172         node.vm.provider :libvirt do |lv|
173           # always make /dev/sd{a/b/c} so that CI can ensure that
174           # virtualbox and libvirt will have the same devices to use for OSDs
175           (1..$kube_node_instances_with_disks_number).each do |d|
176             lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "scsi"
177           end
178         end
179       end
180
181       if $expose_docker_tcp
182         node.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
183       end
184
185       $forwarded_ports.each do |guest, host|
186         node.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
187       end
188
189       if ["rhel7","rhel8"].include? $os
190         # Vagrant synced_folder rsync options cannot be used for RHEL boxes as Rsync package cannot
191         # be installed until the host is registered with a valid Red Hat support subscription
192         node.vm.synced_folder ".", "/vagrant", disabled: false
193         $shared_folders.each do |src, dst|
194           node.vm.synced_folder src, dst
195         end
196       else
197         node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv']
198         $shared_folders.each do |src, dst|
199           node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
200         end
201       end
202
203       ip = "#{$subnet}.#{i+100}"
204       node.vm.network :private_network, ip: ip,
205         :libvirt__guest_ipv6 => 'yes',
206         :libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
207         :libvirt__ipv6_prefix => "64",
208         :libvirt__forward_mode => "none",
209         :libvirt__dhcp_enabled => false
210
211       # Disable swap for each vm
212       node.vm.provision "shell", inline: "swapoff -a"
213
214       # ubuntu1804 and ubuntu2004 have IPv6 explicitly disabled. This undoes that.
215       if ["ubuntu1804", "ubuntu2004"].include? $os
216         node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
217         node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
218       end
219
220       # Disable firewalld on oraclelinux/redhat vms
221       if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os
222         node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
223       end
224
225       host_vars[vm_name] = {
226         "ip": ip,
227         "flannel_interface": "eth1",
228         "kube_network_plugin": $network_plugin,
229         "kube_network_plugin_multus": $multi_networking,
230         "download_run_once": $download_run_once,
231         "download_localhost": "False",
232         "download_cache_dir": ENV['HOME'] + "/kubespray_cache",
233         # Make kubespray cache even when download_run_once is false
234         "download_force_cache": $download_force_cache,
235         # Keeping the cache on the nodes can improve provisioning speed while debugging kubespray
236         "download_keep_remote_cache": "False",
237         "docker_rpm_keepcache": "1",
238         # These two settings will put kubectl and admin.config in $inventory/artifacts
239         "kubeconfig_localhost": "True",
240         "kubectl_localhost": "True",
241         "local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}",
242         "local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}",
243         "ansible_ssh_user": SUPPORTED_OS[$os][:user]
244       }
245
246       # Only execute the Ansible provisioner once, when all the machines are up and ready.
247       # And limit the action to gathering facts, the full playbook is going to be ran by testcases_run.sh
248       if i == $num_instances
249         node.vm.provision "ansible" do |ansible|
250           ansible.playbook = $playbook
251           ansible.verbose = $ansible_verbosity
252           $ansible_inventory_path = File.join( $inventory, "hosts.ini")
253           if File.exist?($ansible_inventory_path)
254             ansible.inventory_path = $ansible_inventory_path
255           end
256           ansible.become = true
257           ansible.limit = "all,localhost"
258           ansible.host_key_checking = false
259           ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
260           ansible.host_vars = host_vars
261           if $ansible_tags != ""
262             ansible.tags = [$ansible_tags]
263           end
264           ansible.groups = {
265             "etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
266             "kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
267             "kube_node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
268             "k8s_cluster:children" => ["kube_control_plane", "kube_node"],
269           }
270         end
271       end
272
273     end
274   end
275 end