forked from debops/debops
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Vagrantfile
727 lines (624 loc) · 26.6 KB
/
Vagrantfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Set up an Ansible Controller host with DebOps support using Vagrant
#
# Copyright (C) 2017 Maciej Delmanowski <[email protected]>
# Copyright (C) 2017 DebOps project https://debops.org/
# Basic usage:
#
# vagrant up && vagrant ssh
# cd src/controller ; debops
# Configuration variables:
#
# VAGRANT_BOX="debian/stretch64"
# Specify the box to use.
#
# VAGRANT_HOSTNAME="stretch"
# Set a custom hostname after the box boots up.
#
# CONTROLLER=false
# Set to 'true' to set up a configuration with normal Diffie-Hellman
# parameters (3072, 2048) instead of a smaller one (1024). Initial DH
# parameter generation may take a long time.
#
# APT_HTTP_PROXY="" (http://apt.example.org:3142)
# Set a custom APT cache URL inside the Vagrant box.
#
# APT_FORCE_NETWORK="4" / APT_FORCE_NETWORK="6"
# Configure APT to connect only over IPv4 or IPv6 network. This might
# be required when connectivity on either network is spotty or broken.
$fix_hostname_dns = <<SCRIPT
set -o nounset -o pipefail -o errexit
current_fqdn="$(hostname --fqdn)"
current_hostname="$(hostname)"
current_default_dev="$(ip route | grep -E '^default via' | awk '{print $5}')"
# In the 'ip route' table, find all of the lines that describe the default
# route based on the device, fint the 'src' field and print out the next field
# which will contain the IP address of the host.
current_default_ip="$(ip route \
| grep "dev "${current_default_dev}"" \
| grep -v -E '^default via' \
| grep src \
| awk '{for (I=1;I<=NF;I++) if ($I == "src") {print $(I+1)};}' \
| uniq)"
# Fix for https://github.com/hashicorp/vagrant/issues/7263
if grep "127.0.0.1" /etc/hosts | grep "${current_fqdn}" > /dev/null ; then
printf "Updating the box IP address to '%s' in /etc/hosts...\n" "${current_default_ip}"
sed -i -e "/^127\.0\.0\.1.*$(hostname -f | sed -e 's/\./\\\./g')/d" /etc/hosts
# The upstream Vagrant box image contains 'stretch' as an alias of
# 'localhost', let's remove it to avoid potential issues.
sed -i -r -e 's/^127\.0\.0\.1\\s+localhost.*$/127.0.0.1\\tlocalhost/' /etc/hosts
# This provisioning script is executed on all nodes in the cluster,
# the "master" node does not have a suffix to extract.
if printf "${current_hostname}\n" | grep -E '^.*\-.*\-node[0-9]{1,3}$' ; then
node_short="$(printf "${current_hostname}" | awk -F'-' '{print $3}')"
else
node_short="master"
fi
# Add an '/etc/hosts' entry for the current host. The rest of the cluster
# will be defined later by the master node.
printf "%s\t%s %s %s\n" "${current_default_ip:-127.0.1.1}" "${current_fqdn}" \
"${current_hostname}" "${node_short}" >> /etc/hosts
fi
# Install Avahi and configure a custom service to help the master host detct
# other nodes in the cluster. Avahi might be blocked later by the firewall, but
# that is expected; the service is not used for anything in particular beyond
# initial cluster provisioning.
#
mkdir -p "/etc/systemd/system/avahi-daemon.service.d"
cat <<EOF >> "/etc/systemd/system/avahi-daemon.service.d/rlimits-override.conf"
# Override installed by DebOps Vagrantfile
#
# Avoid issues with low nproc limits on LXC hosts with unprivileged LXC
# containers sharing host UIDs/GIDs
# Ref: https://github.com/lxc/lxd/issues/2948
# Ref: https://loune.net/2011/02/avahi-setrlimit-nproc-and-lxc/
[Service]
ExecStart=
ExecStart=/usr/sbin/avahi-daemon -s --no-rlimits
EOF
systemctl daemon-reload
if ! type avahi-daemon > /dev/null ; then
apt-get -q update
apt-get -qy install avahi-daemon avahi-utils libnss-mdns
fi
cluster_prefix="$(hostname | sed -e 's/-node.*$//')"
if ! [ -f "/etc/avahi/services/debops-cluster.service" ] ; then
cat <<EOF > "/etc/avahi/services/debops-cluster.service"
<?xml version="1.0" standalone='no'?><!--*-nxml-*-->
<!DOCTYPE service-group SYSTEM "avahi-service.dtd">
<service-group>
<name replace-wildcards="yes">%h</name>
<service>
<type>_${cluster_prefix}-cluster._tcp</type>
<port>22</port>
</service>
</service-group>
EOF
fi
# When external DHCP server is providing networking, its DNS may contain
# a record for the inital hostname of the Vagrant box, sent by default by the
# DHCP client. To avoid name resolution issues, release the current DHCP lease
# and obtain it again, with the new hostname. Hopefully, the DHCP server is
# configured to keep the lease for the same IP for a short time; otherwise
# Vagrant might lose track of the box network configuration.
printf "%s\n" "Restarting network services to get the correct hostname in the DHCP lease..."
if [ -d /run/systemd/system ] ; then
if [ "$(systemctl is-active systemd-networkd.service)" == "active" ] ; then
printf "%s\n" "Restarting systemd-networkd.service"
systemctl restart systemd-networkd.service
else
printf "%s\n" "Restarting networking.service"
systemctl restart networking.service
fi
else
printf "%s\n" "Restarting networking init script"
/etc/init.d/networking restart
fi
SCRIPT
$provision_box = <<SCRIPT
set -o nounset -o pipefail -o errexit
export JANE_VAGRANT_INCEPTION="true"
readonly PROVISION_CI="#{ENV['CI']}"
readonly PROVISION_GITLAB_CI="#{ENV['GITLAB_CI']}"
readonly PROVISION_VAGRANT_HOSTNAME="#{ENV['VAGRANT_HOSTNAME']}"
readonly PROVISION_APT_HTTP_PROXY="#{ENV['APT_HTTP_PROXY']}"
readonly PROVISION_APT_HTTPS_PROXY="#{ENV['APT_HTTPS_PROXY']}"
readonly PROVISION_APT_FORCE_NETWORK="#{ENV['APT_FORCE_NETWORK']}"
readonly PROVISION_ANSIBLE_FROM="#{ENV['ANSIBLE_FROM'] || 'debian'}"
readonly VAGRANT_PREPARE_BOX="#{ENV['VAGRANT_PREPARE_BOX']}"
# Install the Jane script
if ! type jane > /dev/null 2>&1 ; then
export JANE_BOX_INIT="true"
if [ -e "/vagrant/lib/tests/jane" ] ; then
cp /vagrant/lib/tests/jane /usr/local/bin/jane && chmod +x /usr/local/bin/jane
jane notify info "Jane installed"
else
tee "/usr/local/bin/jane" > "/dev/null" <<EOF
#!/bin/sh
# Fake stub Jane script
printf "%s\\n" "JANE: \\${\*}"
EOF
chmod +x /usr/local/bin/jane
fi
else
if [ -e "/vagrant/lib/tests/jane" ] ; then
if ! cmp "/vagrant/lib/tests/jane" "/usr/local/bin/jane" ; then
cp /vagrant/lib/tests/jane /usr/local/bin/jane && chmod +x /usr/local/bin/jane
jane notify info "Jane updated"
else
jane notify ok "Jane up to date"
fi
else
jane notify ok "Jane found at '$(which jane)'"
fi
fi
# Disable automated APT operations as soon as possible. This is only valid for
# testing environments and helps avoid errors due to locked APT/dpkg.
# https://unix.stackexchange.com/questions/315502/
if [ -n "${PROVISION_CI}" ] ; then
jane notify info "Stopping apt-daily.service..."
systemctl stop apt-daily.service || true
systemctl kill --kill-who=all apt-daily.service || true
systemctl stop apt-daily.timer || true
next_wait_time=0
until systemctl list-units --all apt-daily.service | fgrep -q dead || [ $next_wait_time -eq 2 ] ; do
jane notify info "Waiting for apt-daily.service to go down..."
sleep $(( next_wait_time++ ))
done
jane notify info "Stopping apt-daily-upgrade.service..."
systemctl stop apt-daily-upgrade.service || true
systemctl kill --kill-who=all apt-daily-upgrade.service || true
systemctl stop apt-daily-upgrade.timer || true
next_wait_time=1
until systemctl list-units --all apt-daily-upgrade.service | fgrep -q dead || [ $next_wait_time -eq 4 ] ; do
jane notify info "Waiting for apt-daily-upgrade.service to go down..."
sleep $(( next_wait_time++ ))
done
rm -rf /var/lib/systemd/timers/*.timer
fi
# Configure APT proxy
if [ -n "${PROVISION_APT_HTTP_PROXY}" ] ; then
tee "/etc/apt/apt.conf.d/00aptproxy" > "/dev/null" <<EOF
Acquire::http::Proxy "${PROVISION_APT_HTTP_PROXY}";
Acquire::https::Proxy "false";
EOF
if [ -n "${PROVISION_APT_HTTPS_PROXY}" ] ; then
tee -a "/etc/apt/apt.conf.d/00aptproxy" > "/dev/null" <<EOF
Acquire::https::Proxy "${PROVISION_APT_HTTPS_PROXY}";
EOF
else
tee -a "/etc/apt/apt.conf.d/00aptproxy" > "/dev/null" <<EOF
Acquire::https::Proxy "DIRECT";
EOF
fi
fi
# Force IPv4/IPv6 connections in APT depending on the test network requirements
# https://bugs.debian.org/611891
if [ -n "${PROVISION_APT_FORCE_NETWORK}" ] ; then
if [ "${PROVISION_APT_FORCE_NETWORK}" = "4" ] ; then
jane notify info "Forcing APT to only use IPv4 network"
tee -a "/etc/apt/apt.conf.d/00force-ip-network" > "/dev/null" <<EOF
Acquire::ForceIPv4 "true";
EOF
elif [ "${PROVISION_APT_FORCE_NETWORK}" = "6" ] ; then
jane notify info "Forcing APT to only use IPv6 network"
tee -a "/etc/apt/apt.conf.d/00force-ip-network" > "/dev/null" <<EOF
Acquire::ForceIPv6 "true";
EOF
fi
fi
# Configure GitLab CI environment
if [ -n "${PROVISION_GITLAB_CI}" ] && [ "${PROVISION_GITLAB_CI}" == "true" ] ; then
tee "/etc/profile.d/vagrant_vars.sh" > "/dev/null" <<EOF
export JANE_INCEPTION="true"
export CI="#{ENV['CI']}"
export GITLAB_CI="#{ENV['GITLAB_CI']}"
export CI_JOB_ID="#{ENV['CI_JOB_ID']}"
export CI_JOB_NAME="#{ENV['CI_JOB_NAME']}"
export CI_JOB_STAGE="#{ENV['CI_JOB_STAGE']}"
export JANE_TEST_PLAY="#{ENV['JANE_TEST_PLAY']}"
export JANE_FORCE_TESTS="#{ENV['JANE_FORCE_TESTS']}"
export JANE_INVENTORY_DIRS="#{ENV['JANE_INVENTORY_DIRS']}"
export JANE_INVENTORY_GROUPS="#{ENV['JANE_INVENTORY_GROUPS']}"
export JANE_INVENTORY_HOSTVARS="#{ENV['JANE_INVENTORY_HOSTVARS']}"
export JANE_KEEP_BOX="#{ENV['JANE_KEEP_BOX']}"
export VAGRANT_BOX="#{ENV['VAGRANT_BOX']}"
export VAGRANT_DOTFILE_PATH="#{ENV['VAGRANT_DOTFILE_PATH']}"
export TERM="#{ENV['TERM']}"
EOF
fi
provision_apt_http_proxy=""
provision_apt_https_proxy=""
eval $(apt-config shell provision_apt_http_proxy Acquire::http::Proxy)
eval $(apt-config shell provision_apt_https_proxy Acquire::https::Proxy)
if [ -n "${provision_apt_http_proxy}" ] ; then
jane notify config "APT HTTP proxy is enabled: '${provision_apt_http_proxy}'"
fi
if [ -n "${provision_apt_https_proxy}" ] ; then
if [ "${provision_apt_https_proxy}" == "DIRECT" ] ; then
jane notify config "APT HTTPS proxy is disabled"
else
jane notify config "APT HTTPS proxy is enabled: '${provision_apt_https_proxy}'"
fi
fi
ansible_from_debian=""
ansible_from_pypi=""
ansible_from_devel=""
if [ "${PROVISION_ANSIBLE_FROM}" == "debian" ] ; then
ansible_from_debian="ansible"
elif [ "${PROVISION_ANSIBLE_FROM}" == "pypi" ] ; then
ansible_from_pypi="ansible"
else
ansible_from_devel="${PROVISION_ANSIBLE_FROM}"
fi
# Configure Ansible
if ! type ansible > /dev/null 2>&1 ; then
jane notify warning "Ansible not found"
tee "/etc/apt/sources.list" > "/dev/null" <<EOF
deb http://deb.debian.org/debian stretch main
deb http://deb.debian.org/debian stretch-updates main
deb http://deb.debian.org/debian stretch-backports main
deb http://security.debian.org/ stretch/updates main
EOF
tee "/etc/apt/preferences.d/provision_ansible.pref" > "/dev/null" <<EOF
Package: ansible
Pin: release a=stretch-backports
Pin-Priority: 500
EOF
jane notify cache "Refreshing APT cache"
apt-get update
if [ -n "${ansible_from_devel}" ] ; then
jane notify install "Installing Ansible from GitHub..."
/vagrant/ansible/roles/debops.ansible/files/script/bootstrap-ansible "${ansible_from_devel}"
fi
jane notify install "Installing Ansible requirements via APT..."
DEBIAN_FRONTEND=noninteractive apt-get -y \
--no-install-recommends install \
acl \
apt-transport-https \
encfs \
git \
haveged \
jo \
jq \
make \
python-apt \
python-dnspython \
python-future \
python-jinja2 \
python-ldap \
python-nose2 \
python-nose2-cov \
python-openssl \
python-passlib \
python-pip \
python-pycodestyle \
python-pytest \
python-pytest-cov \
python-setuptools \
python-sphinx \
python-sphinx-rtd-theme \
python-unittest2 \
python-wheel \
python-yaml \
rsync \
shellcheck \
yamllint ${ansible_from_debian}
jane notify cache "Cleaning up cache directories..."
find /var/lib/apt/lists -maxdepth 1 -type f ! -name 'lock' -delete
find /var/cache/apt/archives -maxdepth 1 -name '*.deb' -delete
rm -rf /root/.cache/* /tmp/*
else
jane notify ok "Ansible found at '$(which ansible)'"
ansible --version
fi
# Update APT cache on the first boot after provisioning so that APT packages
# can be installed correctly right away.
if [ -z "${JANE_BOX_INIT:-}" ] ; then
jane notify cache "Refreshing APT cache"
apt-get update
# vagrant-libvirt executes virt-sysprep during box packaging.
# virt-sysprep zeroes out files in /usr/local/*, apparently.
# So we need to install PyPI packages on the real box, not the template.
jane notify install "Installing test requirements via PyPI..."
pip install netaddr python-ldap dnspython passlib future testinfra ${ansible_from_pypi}
mkdir /tmp/build
rsync -a --exclude '.vagrant' /vagrant/ /tmp/build
cd /tmp/build
make sdist > /dev/null
pip install dist/*
cd - > /dev/null
jane notify cache "Cleaning up cache directories..."
rm -rf /root/.cache/* /tmp/*
fi
if [ -n "${VAGRANT_PREPARE_BOX}" ] ; then
jane notify info "Removing host entry from '/etc/hosts' for CI environment"
sed -i -e "/$(hostname --fqdn)/d" /etc/hosts
fi
jane notify success "Vagrant box provisioning complete"
SCRIPT
$provision_node_box = <<SCRIPT
set -o nounset -o pipefail -o errexit
JANE_VAGRANT_INCEPTION="true"
PROVISION_GITLAB_CI="#{ENV['GITLAB_CI']}"
PROVISION_APT_HTTP_PROXY="#{ENV['APT_HTTP_PROXY']}"
PROVISION_APT_HTTPS_PROXY="#{ENV['APT_HTTPS_PROXY']}"
PROVISION_APT_FORCE_NETWORK="#{ENV['APT_FORCE_NETWORK']}"
# Configure GitLab CI environment
if [ -n "${PROVISION_GITLAB_CI}" ] && [ "${PROVISION_GITLAB_CI}" == "true" ] ; then
tee "/etc/profile.d/vagrant_vars.sh" > "/dev/null" <<EOF
export JANE_INCEPTION="true"
export CI="#{ENV['CI']}"
export GITLAB_CI="#{ENV['GITLAB_CI']}"
export CI_JOB_ID="#{ENV['CI_JOB_ID']}"
export TERM="#{ENV['TERM']}"
EOF
fi
# Install the CI supervisor script
if ! type jane > /dev/null 2>&1 ; then
if [ -e "/vagrant/lib/tests/jane" ] ; then
cp /vagrant/lib/tests/jane /usr/local/bin/jane && chmod +x /usr/local/bin/jane
jane notify info "Jane installed"
else
tee "/usr/local/bin/jane" > "/dev/null" <<EOF
#!/bin/sh
# Fake Jane script
printf "%s\\n" "JANE: \\${*}"
EOF
chmod +x /usr/local/bin/jane
fi
else
jane notify ok "Jane found at '$(which jane)'"
jane notify info "Refreshing APT sources"
apt-get -qq update
fi
# Configure APT cache
if [ -n "${PROVISION_APT_HTTP_PROXY}" ] ; then
jane notify info "Configuring APT cache at '${PROVISION_APT_HTTP_PROXY}'"
cat "/etc/apt/apt.conf.d/00aptproxy" <<EOF
Acquire::http::Proxy "${PROVISION_APT_HTTP_PROXY}";
EOF
fi
# Force IPv4/IPv6 connections in APT depending on the test network requirements
# https://bugs.debian.org/611891
if [ -n "${PROVISION_APT_FORCE_NETWORK}" ] ; then
if [ "${PROVISION_APT_FORCE_NETWORK}" = "4" ] ; then
jane notify info "Forcing APT to only use IPv4 network"
cat "/etc/apt/apt.conf.d/00force-ip-network" <<EOF
Acquire::ForceIPv4 "true";
EOF
elif [ "${PROVISION_APT_FORCE_NETWORK}" = "6" ] ; then
jane notify info "Forcing APT to only use IPv6 network"
cat "/etc/apt/apt.conf.d/00force-ip-network" <<EOF
Acquire::ForceIPv6 "true";
EOF
fi
fi
jane notify info "Vagrant node provisioning complete"
SCRIPT
$provision_controller = <<SCRIPT
set -o nounset -o pipefail -o errexit
readonly PROVISION_ANSIBLE_FROM="#{ENV['ANSIBLE_FROM'] || 'debian'}"
jane notify info "Configuring Ansible Controller host..."
# Ensure that the Ansible Controller host has up to date APT cache to be able
# to install the packages without friction.
sudo apt-get -q update
ansible_from_pypi=""
if [ "${PROVISION_ANSIBLE_FROM}" == "pypi" ] ; then
ansible_from_pypi="ansible"
fi
jane notify install "Installing test requirements via PyPI..."
sudo pip install netaddr python-ldap dnspython passlib future testinfra ${ansible_from_pypi}
mkdir /tmp/build
rsync -a --exclude '.vagrant' /vagrant/ /tmp/build
cd /tmp/build
make sdist > /dev/null
sudo pip install dist/*
cd - > /dev/null
jane notify cache "Cleaning up cache directories..."
sudo rm -rf /root/.cache/* /tmp/*
if ! [ -e .local/share/debops/debops ] ; then
mkdir -p src .local/share/debops
if [ -d "/vagrant" ] ; then
jane notify info "Symlinking '/vagrant' to '~vagrant/.local/share/debops/debops'"
ln -s /vagrant .local/share/debops/debops
else
jane notify info "Installing DebOps monorepo to '~vagrant/.local/share/debops/debops'"
debops-update
fi
fi
if ! [ -d src/controller ] ; then
debops-init src/controller
sed -i '/ansible_connection=local$/ s/^#//' src/controller/ansible/inventory/hosts
vagrant_controller="$(printf "${SSH_CLIENT}\\n" | awk '{print $1}')"
mkdir -p "src/controller/ansible/inventory/group_vars/all"
mkdir -p "src/controller/ansible/inventory/host_vars/$(hostname)"
cat <<EOF >> "src/controller/ansible/inventory/group_vars/all/dhparam.yml"
---
# Use smaller Diffie-Hellman parameters to speed up test runs
dhparam__bits: [ '1024' ]
EOF
cat <<EOF >> "src/controller/ansible/inventory/group_vars/all/core.yml"
---
# Vagrant client detected from \\$SSH_CLIENT variable
core__ansible_controllers: [ '${vagrant_controller}' ]
EOF
# Provide an 'alias' for the master host in Ansible inventory, for
# convenience and parity with an alias in the '/etc/hosts database.
cat <<EOF >> "src/controller/ansible/inventory/master"
# DebOps master node
[master]
$(hostname)
EOF
# Create the 'nodes' Ansible inventory group for all remote cluster nodes.
cat <<EOF >> "src/controller/ansible/inventory/nodes"
# All DebOps test nodes in the inventory
[nodes]
EOF
cluster_nodes=( $(avahi-browse _$(hostname)-cluster._tcp -pt \
| awk -F';' '{print $4}' | sort | uniq | xargs) )
for node in ${cluster_nodes[@]} ; do
if printf "${node}\\n" | grep -E '^.*\-.*\-node[0-9]{1,3}$' > /dev/null ; then
node_short="$(printf "${node}\\n" | awk -F'-' '{print $3}')"
node_pad=" "
else
node_short=""
node_pad=""
fi
if ! grep "${node}.$(dnsdomainname)" /etc/hosts > /dev/null ; then
jane notify info "Creating ${node}.$(dnsdomainname) host record"
printf "%s\t%s %s%s%s\n" "$(getent hosts ${node}.local | awk '{print $1'})" \
"${node}.$(dnsdomainname)" "${node}" "${node_pad}" "${node_short}" \
| sudo tee --append /etc/hosts
jane notify info "Adding ${node}.$(dnsdomainname) to Ansible inventory"
printf "%s ansible_host=%s\n" "${node}" "${node}.$(dnsdomainname)" \
>> src/controller/ansible/inventory/hosts
# Scan the SSH host fingerprints of the detected nodes based on
# their DNS records. This is done for Ansible usage as well as to
# allow creation of the DNS records on remote nodes.
ssh-keyscan -H "${node}.$(dnsdomainname)" >> ~/.ssh/known_hosts 2>/dev/null
ssh-keyscan -H "${node_short}" >> ~/.ssh/known_hosts 2>/dev/null
ssh-keyscan -H "${node}" >> ~/.ssh/known_hosts 2>/dev/null
# Add the detected node to the 'nodes' Ansible inventory group.
printf "%s\n" "${node}" >> "src/controller/ansible/inventory/nodes"
if [ -n "${node_short}" ] ; then
# Create an 'alias' in the Ansible inventory for a given node,
# for convenience and parity with an alise in the '/etc/hosts'
# database.
cat <<EOF >> "src/controller/ansible/inventory/${node_short}"
[${node_short}]
${node}
EOF
fi
# Connect to each detected node and use Avahi to discover other
# nodes in the cluster and create host entries in the '/etc/hosts'
# database on the remote nodes.
# This does not work during initial '/etc/hosts' configuration and
# has to be done from the master node.
ssh "${node}.$(dnsdomainname)" <<'SSHEND' > /dev/null 2>&1
cluster_nodes=( $(avahi-browse _$(hostname | sed -e 's/\\-node[0-9]\\{1,3\\}$//')-cluster._tcp -pt \
| awk -F';' '{print $4}' | sort | uniq | xargs) )
for node in ${cluster_nodes[@]} ; do
if printf "${node}\n" | grep -E '^.*\-.*\\-node[0-9]{1,3}$' ; then
node_short="$(printf "${node}\\n" | awk -F'-' '{print $3}')"
else
node_short="master"
fi
if ! grep "${node}.$(dnsdomainname)" /etc/hosts > /dev/null ; then
printf "Creating %s host record\\n" "${node}.$(dnsdomainname)"
printf "%s\\t%s %s %s\\n" "$(getent hosts ${node}.local | awk '{print $1'})" \
"${node}.$(dnsdomainname)" "${node}" "${node_short}" \
| sudo tee --append /etc/hosts > /dev/null
fi
done
SSHEND
fi
done
fi
jane notify info "Ansible Controller provisioning complete"
SCRIPT
require 'securerandom'
VAGRANT_DOMAIN = ENV['VAGRANT_DOMAIN'] || 'vagrant.test'
VAGRANT_HOSTNAME_MASTER = (ENV['VAGRANT_DOTFILE_PATH'] || '.vagrant') + '/vagrant_hostname_master'
if File.exist? VAGRANT_HOSTNAME_MASTER
master_hostname = IO.read( VAGRANT_HOSTNAME_MASTER ).strip
else
master_hostname = "debops-#{SecureRandom.hex(3)}"
IO.write( VAGRANT_HOSTNAME_MASTER, master_hostname )
end
master_fqdn = master_hostname + '.' + VAGRANT_DOMAIN
# Persist the number of additional nodes in the DebOps cluster to allow
# 'vagrant' commands without the VAGRANT_NODES variable being set in the
# environment.
VAGRANT_NODE_NUMBER = (ENV['VAGRANT_DOTFILE_PATH'] || '.vagrant') + '/vagrant_node_number'
if File.exist? VAGRANT_NODE_NUMBER
VAGRANT_NODES = ENV['VAGRANT_NODES'] || IO.read( VAGRANT_NODE_NUMBER ).strip
else
VAGRANT_NODES = ENV['VAGRANT_NODES'] || 0
end
IO.write( VAGRANT_NODE_NUMBER, VAGRANT_NODES )
VAGRANT_NODE_BOX = ENV['VAGRANT_NODE_BOX'] || 'debian/stretch64'
# Vagrant removed the atlas.hashicorp.com to vagrantcloud.com
# redirect. The value of DEFAULT_SERVER_URL in Vagrant versions
# less than 1.9.3 is atlas.hashicorp.com. This breaks the fetching
# and updating of boxes as they are now stored in
# vagrantcloud.com instead of atlas.hashicorp.com.
# https://github.com/hashicorp/vagrant/issues/9442
if Vagrant::DEFAULT_SERVER_URL.include?('atlas.hashicorp.com')
Vagrant::DEFAULT_SERVER_URL.replace('https://vagrantcloud.com')
end
Vagrant.configure("2") do |config|
# Create and provision additional nodes first, so that the master node has
# time to do provisioning and cluster detection using Avahi later.
if VAGRANT_NODES != 0
(1..VAGRANT_NODES.to_i).each do |i|
node_fqdn = master_hostname + "-node#{i}." + VAGRANT_DOMAIN
config.vm.define "node#{i}", autostart: true do |node|
node.vm.box = VAGRANT_NODE_BOX
node.vm.hostname = node_fqdn
node.vm.provision "shell", inline: $fix_hostname_dns, keep_color: true
node.vm.provision "shell", inline: $provision_node_box, keep_color: true, run: "always"
# Don't populate '/vagrant' directory on other nodes
node.vm.synced_folder ".", "/vagrant", disabled: true
node.vm.provider "libvirt" do |libvirt, override|
libvirt.random_hostname = true
libvirt.memory = ENV['VAGRANT_NODE_MEMORY'] || '512'
libvirt.cpus = ENV['VAGRANT_NODE_CPUS'] || '2'
if ENV['GITLAB_CI'] != "true"
libvirt.memory = ENV['VAGRANT_NODE_MEMORY'] || '1024'
end
if ENV['VAGRANT_BOX'] || 'debian/stretch64' == 'debian/stretch64'
override.ssh.insert_key = false
end
end
end
end
end
config.vm.define "master", primary: true do |subconfig|
subconfig.vm.box = ENV['VAGRANT_BOX'] || 'debian/stretch64'
subconfig.vm.hostname = master_fqdn
subconfig.vm.provision "shell", inline: $fix_hostname_dns, keep_color: true
subconfig.vm.provision "shell", inline: $provision_box, keep_color: true, run: "always"
# Inject the insecure Vagrant SSH key into the master node so it can be
# used by Ansible and cluster detection to connect to the other nodes.
subconfig.vm.provision "file", source: "#{Dir.home}/.vagrant.d/insecure_private_key", \
destination: "/home/vagrant/.ssh/id_rsa"
subconfig.vm.provision "shell" do |s|
s.inline = <<-SHELL
chown vagrant:vagrant /home/vagrant/.ssh/id_rsa
chmod 600 /home/vagrant/.ssh/id_rsa
SHELL
end
subconfig.vm.provider "libvirt" do |libvirt, override|
# On a libvirt provider, default sync method is NFS. If we switch
# it to 'rsync', this will drop the dependency on NFS on the host.
override.vm.synced_folder ENV['CI_PROJECT_DIR'] || ".", "/vagrant", type: "rsync"
libvirt.random_hostname = true
libvirt.memory = ENV['VAGRANT_MASTER_MEMORY'] || '1024'
libvirt.cpus = ENV['VAGRANT_MASTER_CPUS'] || '2'
if ENV['GITLAB_CI'] != "true"
libvirt.memory = ENV['VAGRANT_MASTER_MEMORY'] || '2048'
libvirt.cpus = ENV['VAGRANT_MASTER_CPUS'] || '4'
end
if ENV['VAGRANT_BOX'] || 'debian/stretch64' == 'debian/stretch64'
override.ssh.insert_key = false
end
end
if Vagrant::Util::Platform.windows? then
# MS Windows doesn't support symlinks, so disable directory sync under it.
# DebOps will be installed normally, via 'debops-update'
subconfig.vm.synced_folder ".", "/vagrant", disabled: true
elsif ENV['GITLAB_CI'] == "true"
# We are running in a GitLab CI environment
subconfig.vm.synced_folder ENV['CI_PROJECT_DIR'] || ".", "/vagrant"
end
if ENV['GITLAB_CI'] != "true"
subconfig.vm.provision "shell", inline: $provision_controller, keep_color: true, privileged: false
end
if ENV['CI'] != "true"
subconfig.vm.post_up_message = "Thanks for trying DebOps! After logging in, run:
cd src/controller ; debops common --diff"
end
end
end