forked from Capgemini/Apollo
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathVagrantfile
147 lines (122 loc) · 4.85 KB
/
Vagrantfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
# -*- mode: ruby -*-
# vi: set ft=ruby :
require 'yaml'
base_dir = File.expand_path(File.dirname(__FILE__))
conf = YAML.load_file(File.join(base_dir, "vagrant.yml"))
groups = YAML.load_file(File.join(base_dir, "ansible-groups.yml"))
require File.join(base_dir, "vagrant_helper")
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
Vagrant.require_version ">= 1.7.0"
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# if you want to use vagrant-cachier,
# please install vagrant-cachier plugin.
if Vagrant.has_plugin?("vagrant-cachier")
config.cache.enable :apt
config.cache.scope = :box
end
# throw error if vagrant-hostmanager not installed
unless Vagrant.has_plugin?("vagrant-hostmanager")
raise "vagrant-hostmanager plugin not installed"
end
config.vm.box = "capgemini/apollo"
config.hostmanager.enabled = true
config.hostmanager.manage_host = true
config.hostmanager.include_offline = true
config.ssh.insert_key = false
# Common ansible groups.
ansible_groups = groups['ansible_groups']
ansible_groups["mesos_masters"] = []
masters_conf = conf['masters']
masters_n = masters_conf['ips'].count
master_infos = []
# Mesos master nodes
(1..masters_n).each { |i|
ip = masters_conf['ips'][i - 1]
node = {
:zookeeper_id => i,
:hostname => "master#{i}",
:ip => ip,
:mem => masters_conf['mem'],
:cpus => masters_conf['cpus'],
}
master_infos.push(node)
# Add the node to the correct ansible group.
ansible_groups["mesos_masters"].push(node[:hostname])
config.vm.define node[:hostname] do |cfg|
cfg.vm.provider :virtualbox do |vb, machine|
machine.vm.hostname = node[:hostname]
machine.vm.network :private_network, :ip => node[:ip]
vb.name = 'vagrant-mesos-' + node[:hostname]
vb.customize ["modifyvm", :id, "--memory", node[:mem], "--cpus", node[:cpus] ]
end
end
}
# zookeeper_peers e.g. 172.31.1.11:2181,172.31.1.12:2181,172.31.1.13:2181
zookeeper_peers = master_infos.map{|master| master[:ip]+":2181"}.join(",")
# zookeeper_conf e.g. server.1=172.31.1.11:2888:3888 server.2=172.31.1.12:2888:3888 server.3=172.31.1.13:2888:3888
zookeeper_conf = master_infos.map{|master| "server.#{master[:zookeeper_id]}"+"="+master[:ip]+":2888:3888"}.join(" ")
# consul_js e.g. 172.31.1.11 172.31.1.12 172.31.1.13
consul_join = master_infos.map{|master| master[:ip]}.join(" ")
# consul_retry_join e.g. "172.31.1.11", "172.31.1.12", "172.31.1.13"
consul_retry_join = master_infos.map{|master| "\"#{master[:ip]}\""}.join(", ")
# Ansible variables
ansible_extra_vars = {
zookeeper_peers: zookeeper_peers,
zookeeper_conf: zookeeper_conf,
consul_join: consul_join,
consul_retry_join: consul_retry_join,
mesos_master_quorum: conf['mesos_master_quorum'],
consul_bootstrap_expect: conf['consul_bootstrap_expect']
}
# Apollo environment variables
apollo_vars = get_apollo_variables(ENV)
# Add apollo variables to ansible ones
ansible_extra_vars.merge!(apollo_vars)
# Mesos slave nodes
slaves_conf = conf['slaves']
ansible_groups["mesos_slaves"] = []
slave_n = slaves_conf['ips'].count
(1..slave_n).each { |i|
ip = slaves_conf['ips'][i - 1]
node = {
:hostname => "slave#{i}",
:ip => ip,
:mem => slaves_conf['mem'],
:cpus => slaves_conf['cpus'],
}
# Add the node to the correct ansible group.
ansible_groups["mesos_slaves"].push(node[:hostname])
config.vm.define node[:hostname] do |cfg|
cfg.vm.provider :virtualbox do |vb, machine|
machine.vm.hostname = node[:hostname]
machine.vm.network :private_network, :ip => node[:ip]
vb.name = 'vagrant-mesos-' + node[:hostname]
vb.customize ["modifyvm", :id, "--memory", node[:mem], "--cpus", node[:cpus] ]
# We invoke ansible on the last slave with ansible.limit = 'all'
# this runs the provisioning across all masters and slaves in parallel.
if node[:hostname] == "slave#{slave_n}"
machine.vm.provision :ansible do |ansible|
ansible.playbook = "site.yml"
ansible.sudo = true
unless ENV['ANSIBLE_LOG'].nil? || ENV['ANSIBLE_LOG'].empty?
ansible.verbose = "#{ENV['ANSIBLE_LOG'].delete('-')}"
end
ansible.groups = ansible_groups
ansible.limit = 'all'
ansible.extra_vars = ansible_extra_vars
end
end
end
end
}
# If you want to use a custom `.dockercfg` file simply place it
# in this directory.
if File.exist?(".dockercfg")
config.vm.provision :shell, :priviledged => true, :inline => <<-SCRIPT
cp /vagrant/.dockercfg /root/.dockercfg
chmod 600 /root/.dockercfg
chown root /root/.dockercfg
SCRIPT
end
end