Skip to content

Commit

Permalink
Updated Ansible scripts
Browse files Browse the repository at this point in the history
  • Loading branch information
larsgeorge committed Jun 11, 2016
1 parent 4aedfe2 commit bcbd833
Show file tree
Hide file tree
Showing 32 changed files with 1,016 additions and 318 deletions.
6 changes: 4 additions & 2 deletions cluster/ansible/group_vars/all
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
---

SECURITY_ENABLED: false

RESTART_ON_CHANGE: true

CLUSTER_DOMAIN: internal.larsgeorge.com
OS_USERS: [ 'hdfs', 'yarn', 'mapred', 'hbase', 'zookeeper', 'hue' ]

SSL_ENABLED: true
SSL_ENABLED: false
SSL_SERVICES:
- { owner: 'hdfs', path: 'hadoop' }
# - { owner: 'yarn', path: 'hadoop' } # YARN shares the "hadoop" configuration with HDFS
Expand All @@ -14,7 +16,7 @@ SSL_SERVICES:
SSL_STORE_PASSWORD: sslsekret
SSL_KEY_PASSWORD: sslsekret

KERBEROS_ENABLED: true
KERBEROS_ENABLED: false
KRB_REALM: INTERNAL.LARSGEORGE.COM
KRB_SERVER: master-2.internal.larsgeorge.com
KRB_PRINCIPALS: [ 'hdfs', 'HTTP', 'yarn', 'hbase', 'zookeeper' ]
Expand Down
11 changes: 11 additions & 0 deletions cluster/ansible/install-common.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
---
# Sub playbook to install all common components needed.

- name: Apply common configuration to all nodes
hosts: all
remote_user: larsgeorge
become: yes

roles:
- common
- { role: krb-client, when: KERBEROS_ENABLED == true }
26 changes: 26 additions & 0 deletions cluster/ansible/install-monitoring.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
---
# Sub playbook to install all monitoring components needed.

- name: Install monitoring collection daemons
hosts: monitoring_collection
remote_user: larsgeorge
become: yes

roles:
- monitoring-collection

- name: Install monitoring aggregation daemons
hosts: monitoring_aggregation
remote_user: larsgeorge
become: yes

roles:
- monitoring-aggregation

- name: Install monitoring frontend
hosts: monitoring_frontend
remote_user: larsgeorge
become: yes

roles:
- monitoring-frontend
35 changes: 35 additions & 0 deletions cluster/ansible/install-security.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
---
# Sub playbook to install all security components needed.

- name: Set up Kerberos KDC
hosts: kdc_server
remote_user: larsgeorge
become: yes

roles:
- { role: kdc-server, when: KERBEROS_ENABLED == true }

- name: SSL Phase 1 (Prepare Servers)
hosts: all
remote_user: larsgeorge
become: yes

roles:
- { role: ssl-phase-1, when: SSL_ENABLED == true }

- name: SSL Phase 2 (Sign CSRs)
hosts: kdc-server
remote_user: larsgeorge
become: yes

roles:
- { role: ssl-phase-2, when: SSL_ENABLED == true }

- name: SSL Phase 3 (Install CSRs and Truststores)
hosts: all
remote_user: larsgeorge
become: yes

roles:
- { role: ssl-phase-3, when: SSL_ENABLED == true }

20 changes: 15 additions & 5 deletions cluster/ansible/inventories/cluster.inv
Original file line number Diff line number Diff line change
Expand Up @@ -4,24 +4,34 @@ master-[1:3].internal.larsgeorge.com
[slaves]
slave-[1:3].internal.larsgeorge.com

[hbase-master]
[hbase_master]
master-1.internal.larsgeorge.com

[hbase-backup-masters]
[hbase_backup_masters]
master-[2:3].internal.larsgeorge.com

[hbase-slaves:children]
[hbase_slaves:children]
slaves

[historyserver]
master-1.internal.larsgeorge.com

[kdc-server]
[kdc_server]
master-2.internal.larsgeorge.com

[cluster:children]
masters
slaves

[zookeepers:children]
masters
masters

[monitoring_collection:children]
masters
slaves

[monitoring_aggregation]
master-3.internal.larsgeorge.com

[monitoring_frontend]
master-3.internal.larsgeorge.com
74 changes: 74 additions & 0 deletions cluster/ansible/roles/common/tasks/install-common.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
---
# Purpose: Common node related settings
#
# Notes:
# - Executed on all servers in the cluster

# Set os prerequisites

- name: Disable SE Linux
selinux: state=disabled

- name: Stop IP Tables
service: name={{ item }} state=stopped enabled=no
with_items:
- iptables
- ip6tables

- name: Set file limits
lineinfile: dest=/etc/security/limits.conf line="{{ item }}" state=present
with_items:
- '* soft nofile 65535'
- '* hard nofile 65535'
- '* soft memlock unlimited'
- '* hard memlock unlimited'

- name: Adjust values in sysctl.conf
sysctl: name={{ item.name }} value={{ item.value }} state={{ item.state }}
with_items:
- { name: 'vm.swappiness', value: '1', state: 'present' }
- { name: 'net.ipv6.conf.all.disable_ipv6', value: '1', state: 'present' }
- { name: 'net.ipv6.conf.default.disable_ipv6', value: '1', state: 'present' }

- name: Disable transparent huge page defragmentation
command: echo never > /sys/kernel/mm/transparent_hugepage/defrag

# Add shared environment details and install packages

- name: Add variables to /etc/environment
lineinfile: dest=/etc/environment line="{{ item }}" state=present
with_items:
- 'JAVA_HOME={{ JAVA_HOME }}'
- 'JAVA_LIBRARY_PATH=/usr/local/lib'
- 'ZOOKEEPER_HOME={{ PACKAGE_BASE }}/zookeeper'
- 'ZOOCFGDIR={{ CONFIG_BASE }}/zookeeper/conf'
- 'HADOOP_HOME={{ PACKAGE_BASE }}/hadoop'
- 'HADOOP_CONF_DIR={{ CONFIG_BASE }}/hadoop/conf'
- 'HBASE_HOME={{ PACKAGE_BASE }}/hbase'
- 'HBASE_CONF_DIR={{ CONFIG_BASE }}/hbase/conf'

- name: Set PATH in bash profile
lineinfile: dest=.bash_profile line="{{ item }}" state=present
with_items:
- 'export PATH=$PATH:$HOME/bin:$JAVA_HOME/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HBASE_HOME/bin'

- name: Install basic OS packages
yum: name={{ item }} state=present # update_cache=yes
with_items:
- epel-release
- snappy
- snappy-devel
- ntp
- ntpdate
- java-1.7.0-openjdk
- java-1.7.0-openjdk-devel
- libselinux-python
- unzip
- tar

- name: Start services
service: name={{ item }} state=started enabled=yes
with_items:
- ntpd


90 changes: 90 additions & 0 deletions cluster/ansible/roles/common/tasks/install-hadoop.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
---
# Purpose: Common Hadoop node related settings
#
# Notes:
# - Executed on all servers in the cluster
# - Installs also the Hadoop related tarballs etc.

# TODO: Install Hadoop and other packages from tarballs

- name: Create Hadoop system user accounts
group: name=hadoop state=present
- user: name={{ item }} group=hadoop createhome=no shell=/bin/false state=present
with_items:
- "{{ OS_USERS }}"

- name: Create Hadoop related directories (secure mode)
file: path={{ item.path }} owner={{ item.owner }} group=hadoop mode={{ item.mode }} recurse=yes state=directory
with_items:
- { path: '/data/hadoop/hdfs', owner: 'hdfs', mode: 700 }
- { path: '{{ VAR_RUN_BASE }}/hadoop', owner: 'hdfs', mode: 755 }
- { path: '{{ VAR_RUN_BASE }}/hbase', owner: 'hbase', mode: 755 }
- { path: '{{ VAR_RUN_BASE }}/zookeeper', owner: 'zookeeper', mode: 755 }
- { path: '{{ CONFIG_BASE }}/hadoop', owner: 'hdfs', mode: 775 }
- { path: '{{ CONFIG_BASE }}/hbase', owner: 'hbase', mode: 775 }
- { path: '{{ CONFIG_BASE }}/zookeeper', owner: 'zookeeper', mode: 775 }
when: SECURITY_ENABLED == true

- name: Create Hadoop related directories (non-secure mode)
file: path={{ item.path }} owner={{ item.owner }} group=hadoop mode={{ item.mode }} recurse=yes state=directory
with_items:
- { path: '/data/hadoop/hdfs', owner: 'hadoop', mode: 775 }
- { path: '{{ VAR_RUN_BASE }}/hadoop', owner: 'hadoop', mode: 775 }
- { path: '{{ VAR_RUN_BASE }}/hbase', owner: 'hadoop', mode: 775 }
- { path: '{{ VAR_RUN_BASE }}/zookeeper', owner: 'zookeeper', mode: 775 }
- { path: '{{ CONFIG_BASE }}/hadoop', owner: 'hadoop', mode: 775 }
- { path: '{{ CONFIG_BASE }}/hbase', owner: 'hadoop', mode: 775 }
- { path: '{{ CONFIG_BASE }}/zookeeper', owner: 'zookeeper', mode: 775 }
when: SECURITY_ENABLED == false

# TODO:
# - name: Copy and untar Hadoop
# - name: Copy and untar HBase
# - name: Copy and untar ZooKeeper
# - name: Copy and untar Hue

- name: Put Hadoop configuration in place
template: src={{ item.src }} dest={{ item.dest }} owner=hdfs group=hadoop mode=0644
with_items:
- { src: "etc/opt/hadoop/conf/slaves.j2", dest: "{{ CONFIG_BASE }}/hadoop/conf/slaves" }
- { src: "etc/opt/hadoop/conf/hadoop-env.sh.j2", dest: "{{ CONFIG_BASE }}/hadoop/conf/hadoop-env.sh" }
- { src: "etc/opt/hadoop/conf/yarn-env.sh.j2", dest: "{{ CONFIG_BASE }}/hadoop/conf/yarn-env.sh" }
- { src: "etc/opt/hadoop/conf/mapred-env.sh.j2", dest: "{{ CONFIG_BASE }}/hadoop/conf/mapred-env.sh" }
- { src: "etc/opt/hadoop/conf/core-site.xml.j2", dest: "{{ CONFIG_BASE }}/hadoop/conf/core-site.xml" }
- { src: "etc/opt/hadoop/conf/hdfs-site.xml.j2", dest: "{{ CONFIG_BASE }}/hadoop/conf/hdfs-site.xml" }
- { src: "etc/opt/hadoop/conf/yarn-site.xml.j2", dest: "{{ CONFIG_BASE }}/hadoop/conf/yarn-site.xml" }
- { src: "etc/opt/hadoop/conf/mapred-site.xml.j2", dest: "{{ CONFIG_BASE }}/hadoop/conf/mapred-site.xml" }
- { src: "etc/opt/hadoop/conf/ssl-client.xml.j2", dest: "{{ CONFIG_BASE }}/hadoop/conf/ssl-client.xml" }
- { src: "etc/opt/hadoop/conf/ssl-server.xml.j2", dest: "{{ CONFIG_BASE }}/hadoop/conf/ssl-server.xml" }
- { src: "etc/opt/hadoop/conf/container-executor.cfg.j2", dest: "{{ CONFIG_BASE }}/hadoop/conf/container-executor.cfg" }
# notify:
# - restart hadoop

# Also: hadoop-policy.xml, httpfs-env.sh, httpfs-site.xml, log4j.properties

- name: Put HBase configuration in place
template: src={{ item.src }} dest={{ item.dest }} owner=hbase group=hadoop mode=0644
with_items:
- { src: "etc/opt/hbase/conf/regionservers.j2", dest: "{{ CONFIG_BASE }}/hbase/conf/regionservers" }
- { src: "etc/opt/hbase/conf/backup-masters.j2", dest: "{{ CONFIG_BASE }}/hbase/conf/backup-masters" }
- { src: "etc/opt/hbase/conf/hbase-env.sh.j2", dest: "{{ CONFIG_BASE }}/hbase/conf/hbase-env.sh" }
- { src: "etc/opt/hbase/conf/hbase-site.xml.j2", dest: "{{ CONFIG_BASE }}/hbase/conf/hbase-site.xml" }
# notify:
# - restart hbase

# Also: hbase-policy.xml, log4j.properties, hadoop-metrics2-hbase.properties

- name: Put Zookeeper configuration in place
template: src={{ item.src }} dest={{ item.dest }} owner=zookeeper group=hadoop mode=0644
with_items:
- { src: "etc/opt/zookeeper/conf/zoo.cfg.j2", dest: "{{ CONFIG_BASE }}/zookeeper/conf/zoo.cfg" }
# notify:
# - restart zookeeper

# Also: log4j.properties
# Zookeeper: http://www.cloudera.com/documentation/enterprise/latest/topics/cdh_sg_zookeeper_security.html

- name: Copy init.d scripts
copy: src=etc/init.d/ dest=/etc/init.d/ owner=root group=root mode=755 backup=yes


Loading

0 comments on commit bcbd833

Please sign in to comment.