Skip to content

Commit

Permalink
Merge pull request kubernetes#867 from derekwaynecarr/apiserver_loopback
Browse files Browse the repository at this point in the history
Vagrant: apiserver doesn't bind to 127.0.0.1, inaccessible through master nginx
  • Loading branch information
brendandburns committed Aug 14, 2014
2 parents 25352c5 + 10be802 commit bf7f8a2
Show file tree
Hide file tree
Showing 8 changed files with 76 additions and 39 deletions.
1 change: 0 additions & 1 deletion cluster/saltbase/salt/apiserver/default
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@

{% if grains.etcd_servers is defined %}
{% set etcd_servers = "-etcd_servers=http://" + grains.etcd_servers + ":4001" %}
{% set address = "-address=" + grains.etcd_servers %}
{% else %}
{% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() %}
{% set etcd_servers = "-etcd_servers=http://" + ips[0][0] + ":4001" %}
Expand Down
3 changes: 0 additions & 3 deletions cluster/saltbase/salt/controller-manager/default
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,4 @@
{% set daemon_args = "" %}
{% endif %}
{% set master="-master=127.0.0.1:8080" %}
{% if grains.master_ip is defined %}
{% set master="-master=" + grains.master_ip + ":8080" %}
{% endif %}
DAEMON_ARGS="{{daemon_args}} {{master}}"
2 changes: 1 addition & 1 deletion cluster/saltbase/salt/nginx/kubernetes-site
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ server {
auth_basic_user_file /usr/share/nginx/htpasswd;

# Proxy settings
proxy_pass http://localhost:8080/;
proxy_pass http://127.0.0.1:8080/;
proxy_connect_timeout 159s;
proxy_send_timeout 600s;
proxy_read_timeout 600s;
Expand Down
2 changes: 1 addition & 1 deletion cluster/vagrant/config-default.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ NUM_MINIONS=${KUBERNETES_NUM_MINIONS-"3"}

# IP LOCATIONS FOR INTERACTING WITH THE MASTER
export KUBE_MASTER_IP="10.245.1.2"
export KUBERNETES_MASTER="http://10.245.1.2:8080"
export KUBERNETES_MASTER="https://10.245.1.2"

# IP LOCATIONS FOR INTERACTING WITH THE MINIONS
MINION_IP_BASE="10.245.2."
Expand Down
6 changes: 4 additions & 2 deletions cluster/vagrant/provision-config.sh
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ MINION_IP_RANGES=($(eval echo "10.245.{2..${NUM_MINIONS}}.2/24"))
MINION_SCOPES=""

# simplified setup for local vagrant 2 node cluster
MASTER_HTPASSWD=passw0rd

MASTER_USER=vagrant
MASTER_PASSWD=vagrant

# Location to hold temp files for provision process
KUBE_TEMP=/var/kube-temp
37 changes: 21 additions & 16 deletions cluster/vagrant/provision-master.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,11 @@
set -e
source $(dirname $0)/provision-config.sh

# we will run provision to update code each time we test, so we do not want to do salt install each time
if [ ! -f "/var/kube-vagrant-setup" ]; then
mkdir -p /etc/salt/minion.d
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf
# Update salt configuration
mkdir -p /etc/salt/minion.d
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf

cat <<EOF >/etc/salt/minion.d/grains.conf
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
master_ip: $MASTER_IP
etcd_servers: $MASTER_IP
Expand All @@ -32,27 +31,38 @@ grains:
- kubernetes-master
EOF

# Configure the salt-master
# Auto accept all keys from minions that try to join
mkdir -p /etc/salt/master.d
cat <<EOF >/etc/salt/master.d/auto-accept.conf
# Configure the salt-master
# Auto accept all keys from minions that try to join
mkdir -p /etc/salt/master.d
cat <<EOF >/etc/salt/master.d/auto-accept.conf
open_mode: True
auto_accept: True
EOF

cat <<EOF >/etc/salt/master.d/reactor.conf
cat <<EOF >/etc/salt/master.d/reactor.conf
# React to new minions starting by running highstate on them.
reactor:
- 'salt/minion/*/start':
- /srv/reactor/start.sls
EOF

cat <<EOF >/etc/salt/master.d/salt-output.conf
cat <<EOF >/etc/salt/master.d/salt-output.conf
# Minimize the amount of output to terminal
state_verbose: False
state_output: mixed
EOF

# Configure nginx authorization
mkdir -p $KUBE_TEMP
mkdir -p /srv/salt/nginx
echo "Using password: $MASTER_USER:$MASTER_PASSWD"
python $(dirname $0)/../../third_party/htpasswd/htpasswd.py -b -c ${KUBE_TEMP}/htpasswd $MASTER_USER $MASTER_PASSWD
MASTER_HTPASSWD=$(cat ${KUBE_TEMP}/htpasswd)
echo $MASTER_HTPASSWD > /srv/salt/nginx/htpasswd

# we will run provision to update code each time we test, so we do not want to do salt install each time
if [ ! $(which salt-master) ]; then

# Install Salt
#
# We specify -X to avoid a race condition that can cause minion failure to
Expand All @@ -67,11 +77,6 @@ EOF
# (a new service file needs to be added for salt-api)
curl -sS -L https://raw.githubusercontent.com/saltstack/salt-bootstrap/v2014.06.30/bootstrap-salt.sh | sh -s -- -M

mkdir -p /srv/salt/nginx
echo $MASTER_HTPASSWD > /srv/salt/nginx/htpasswd

# a file we touch to state that base-setup is done
echo "Salt configured" > /var/kube-vagrant-setup
fi

# Build release
Expand Down
27 changes: 12 additions & 15 deletions cluster/vagrant/provision-minion.sh
Original file line number Diff line number Diff line change
Expand Up @@ -19,20 +19,19 @@ set -e
source $(dirname $0)/provision-config.sh

MINION_IP=$4
# we will run provision to update code each time we test, so we do not want to do salt install each time
if [ ! -f "/var/kube-vagrant-setup" ]; then

if [ ! "$(cat /etc/hosts | grep $MASTER_NAME)" ]; then
echo "Adding host entry for $MASTER_NAME"
echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts
fi
# make sure each minion has an entry in hosts file for master
if [ ! "$(cat /etc/hosts | grep $MASTER_NAME)" ]; then
echo "Adding host entry for $MASTER_NAME"
echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts
fi

# Prepopulate the name of the Master
mkdir -p /etc/salt/minion.d
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf
# Let the minion know who its master is
mkdir -p /etc/salt/minion.d
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf

# Our minions will have a pool role to distinguish them from the master.
cat <<EOF >/etc/salt/minion.d/grains.conf
# Our minions will have a pool role to distinguish them from the master.
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
minion_ip: $MINION_IP
etcd_servers: $MASTER_IP
Expand All @@ -41,6 +40,8 @@ grains:
cbr-cidr: $MINION_IP_RANGE
EOF

# we will run provision to update code each time we test, so we do not want to do salt install each time
if [ ! $(which salt-minion) ]; then
# Install Salt
#
# We specify -X to avoid a race condition that can cause minion failure to
Expand All @@ -50,8 +51,4 @@ EOF
## TODO this only works on systemd distros, need to find a work-around as removing -X above fails to start the services installed
systemctl enable salt-minion
systemctl start salt-minion

# a file we touch to state that base-setup is done
echo "Salt configured" > /var/kube-vagrant-setup

fi
37 changes: 37 additions & 0 deletions docs/getting-started-guides/vagrant.md
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,28 @@ cluster/kube-push.sh => updates a vagrant cluster
cluster/kubecfg.sh => interact with the cluster
```

### Authenticating with your master

To interact with the cluster, you must authenticate with the master when running cluster/kubecfg.sh commands.

If it's your first time using the cluster, your first invocation of cluster/kubecfg.sh will prompt you for credentials:

```
cd kubernetes
cluster/kubecfg.sh list minions
Please enter Username: vagrant
Please enter Password: vagrant
Minion identifier
----------
```

The kubecfg.sh command will cache your credentials in a .kubernetes_auth file so you will not be prompted in the future.
```
cat ~/.kubernetes_auth
{"User":"vagrant","Password":"vagrant"}
```

If you try Kubernetes against multiple cloud providers, make sure this file is correct for your target environment.

### Running a container

Expand Down Expand Up @@ -131,6 +153,21 @@ hack/e2e-test.sh

### Troubleshooting

#### I just created the cluster, but I am getting authorization errors!

You probably have an incorrect ~/.kubernetes_auth file for the cluster you are attempting to contact.

```
rm ~/.kubernetes_auth
```

And when using kubecfg.sh, provide the correct credentials:

```
Please enter Username: vagrant
Please enter Password: vagrant
```

#### I just created the cluster, but I do not see my container running!

If this is your first time creating the cluster, the kubelet on each minion schedules a number of docker pull requests to fetch prerequisite images. This can take some time and as a result may delay your initial pod getting provisioned.
Expand Down

0 comments on commit bf7f8a2

Please sign in to comment.