diff --git a/README.md b/README.md index e9bfb75..f5a9ef5 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,8 @@ # ELK-Stack-with-Vagrant-and-Ansible Building an ELK stack with Vagrant and Ansible + +This is the source code to along with blog article + +[ELK-Stack-with-Vagrant-and-Ansible]() + + diff --git a/Vagrantfile b/Vagrantfile new file mode 100644 index 0000000..3a9ff99 --- /dev/null +++ b/Vagrantfile @@ -0,0 +1,42 @@ +# -*- mode: ruby -*- +# vi: ft=ruby : + +# +# Borrowed the idea from http://bertvv.github.io/notes-to-self/2015/10/05/one-vagrantfile-to-rule-them-all/ +# + +require 'rbconfig' +require 'yaml' + +DEFAULT_BASE_BOX = "bento/ubuntu-16.04" +cpuCap = 10 # Limit to 10% of the cpu +inventory = YAML.load_file("inventory.yml") # Get the names & ip addresses for the guest hosts +VAGRANTFILE_API_VERSION = '2' + +def provision_ansible(config) + config.vm.provision "ansible" do |ansible| + ansible.playbook = "elkf.yml" + ansible.become = true + end +end + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + config.vbguest.auto_update = false + inventory.each do |group, groupHosts| + next if (group == "justLocal") + groupHosts['hosts'].each do |hostName, hostInfo| + config.vm.define hostName do |node| + node.vm.box = hostInfo['box'] ||= DEFAULT_BASE_BOX + node.vm.hostname = hostName # Set the hostname + node.vm.network :private_network, ip: hostInfo['ansible_host'] # Set the IP address + ram = hostInfo['memory'] # Set the memory + node.vm.provider :virtualbox do |vb| + vb.name = hostName + vb.customize ["modifyvm", :id, "--cpuexecutioncap", cpuCap, "--memory", ram.to_s] + end + end + end + end + provision_ansible(config) +end + diff --git a/elk.yml b/elk.yml new file mode 100644 index 0000000..a0618cf --- /dev/null +++ b/elk.yml @@ -0,0 +1,26 @@ +- hosts: es-master-nodes + become: true + roles: + - { role: elastic.elasticsearch, cluster_http_port: 9201, cluster_transport_tcp_port: 9301} + +- hosts: es-data-nodes + become: true + roles: + - { role: elastic.elasticsearch, cluster_http_port: 9201, cluster_transport_tcp_port: 9301} + +- hosts: kibana-nodes + become: true + roles: + - { role: ashokc.kibana, kibana_server_port: 5601, cluster_http_port: 9201 } + +- hosts: logstash-nodes + become: true + roles: + - { role: ashokc.logstash, cluster_http_port: 9201, filebeat_2_logstash_port: 5044 } + +- hosts: filebeat-nodes + become: true + roles: + - {role: ashokc.filebeat, filebeat_2_logstash_port: 5044 } + + diff --git a/files/custom-filter.conf b/files/custom-filter.conf new file mode 100644 index 0000000..04eedfd --- /dev/null +++ b/files/custom-filter.conf @@ -0,0 +1,16 @@ +filter { + if [fields][log_type] == "custom" { + grok { + match => [ "message", "(?\w{3}\s+\w{3}\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2}\s+\d{4})\s+(?\d{1,3}):(?\d{1,2}):(?\d{1,2}):(?\d{1,2})\s+(?\d{1,2}):(?\w+) Type: (?\w+):[^#]+# (?\d+)\s+%{GREEDYDATA}" ] + add_tag => ["grokked"] + add_field => { "foo_%{nDays}" => "Hello world, from %{nHrs}" } + } + mutate { + gsub => ["message", "ELK", "BULK"] + } + date { + match => [ "timestamp" , "EEE MMM d H:m:s Y", "EEE MMM d H:m:s Y" ] + add_tag => ["dated"] + } + } +} diff --git a/files/genLogs.pl b/files/genLogs.pl new file mode 100755 index 0000000..40520cc --- /dev/null +++ b/files/genLogs.pl @@ -0,0 +1,26 @@ +#!/usr/bin/perl -w +use strict ; +no warnings 'once'; +my @codes = qw (fatal error warning info debug trace) ; +open(my $fh, ">>", "/tmp/custom.log") ; +$fh->autoflush(1); +my $now = time(); +for my $i (1 .. 100) { + my $message0 = "Type: CustomLog: This is a generic message # $i for testing ELK" ; + my $nDays = int(rand(5)) ; + my $nHrs = int(rand(24)) ; + my $nMins = int(rand(60)) ; + my $nSecs = int(rand(60)) ; + my $timeValue = $now - $nDays * 86400 - $nHrs * 3600 - $nMins * 60 - $nSecs ; + my $now1 = localtime($timeValue) ; + my $nMulti = int(rand(10)) ; + my $message = "$now1 $nDays:$nHrs:$nMins:$nSecs $nMulti:$codes[int(rand($#codes))] $message0" ; + if ($nMulti > 0) { + for my $line (1 .. $nMulti) { + $message = $message . "\n ++ continuing the previous line for this log error..." + } + } + print $fh "$message\n" ; +} +close $fh ; + diff --git a/group_vars/all.yml b/group_vars/all.yml new file mode 100644 index 0000000..5fec690 --- /dev/null +++ b/group_vars/all.yml @@ -0,0 +1,7 @@ +private_iface: eth0 +public_iface: eth1 +elk_version: 5.6.1 +es_major_version: 5.x +es_apt_key: https://artifacts.elastic.co/GPG-KEY-elasticsearch +es_version: "{{ elk_version }}" +es_apt_url: deb https://artifacts.elastic.co/packages/{{ es_major_version }}/apt stable main diff --git a/group_vars/es-data-nodes.json b/group_vars/es-data-nodes.json new file mode 100644 index 0000000..8e4a708 --- /dev/null +++ b/group_vars/es-data-nodes.json @@ -0,0 +1,17 @@ +{ + "es_data_dirs" : "/opt/elasticsearch", + "es_java_install" : true, + "es_api_port": "{{cluster_http_port}}", + "es_instance_name" : "{{cluster_http_port}}_{{cluster_transport_tcp_port}}", + "masterHosts_transport" : "{% for host in groups['es-master-nodes'] %} {{hostvars[host]['ansible_'+public_iface]['ipv4']['address'] }}:{{cluster_transport_tcp_port}}{%endfor %}", + "es_config": { + "cluster.name": "{{es_instance_name}}", + "http.port": "{{cluster_http_port}}", + "transport.tcp.port": "{{cluster_transport_tcp_port}}", + "node.master": false, + "node.data": true, + "network.host": ["{{ hostvars[inventory_hostname]['ansible_' + public_iface]['ipv4']['address'] }}","_local_" ], + "discovery.zen.ping.unicast.hosts" : "{{ masterHosts_transport.split() }}" + } +} + diff --git a/group_vars/es-master-nodes.json b/group_vars/es-master-nodes.json new file mode 100644 index 0000000..eaa6f49 --- /dev/null +++ b/group_vars/es-master-nodes.json @@ -0,0 +1,17 @@ +{ + "es_heap_size" : "256m", + "es_java_install" : true, + "es_api_port": "{{cluster_http_port}}", + "es_instance_name" : "{{cluster_http_port}}_{{cluster_transport_tcp_port}}", + "masterHosts_transport" : "{% for host in groups['es-master-nodes'] %} {{hostvars[host]['ansible_'+public_iface]['ipv4']['address'] }}:{{cluster_transport_tcp_port}}{%endfor %}", + "es_config": { + "cluster.name": "{{es_instance_name}}", + "http.port": "{{cluster_http_port}}", + "transport.tcp.port": "{{cluster_transport_tcp_port}}", + "node.master": true, + "node.data": false, + "network.host": ["{{ hostvars[inventory_hostname]['ansible_' + public_iface]['ipv4']['address'] }}","_local_" ], + "discovery.zen.ping.unicast.hosts" : "{{ masterHosts_transport.split() }}" + } +} + diff --git a/group_vars/filebeat-nodes.yml b/group_vars/filebeat-nodes.yml new file mode 100644 index 0000000..5e8615a --- /dev/null +++ b/group_vars/filebeat-nodes.yml @@ -0,0 +1,6 @@ +filebeat_version: "{{ elk_version }}" +filebeat_user: filebeatUser +filebeat_group: filebeatGroup +filebeat_enabled_on_boot: yes +logstashHostsList: "{% for host in groups['logstash-nodes'] %} {{hostvars[host]['ansible_'+public_iface]['ipv4']['address'] }}:{{filebeat_2_logstash_port}}{% endfor %}" +filebeat_logstash_hosts: "{{ logstashHostsList.split() }}" diff --git a/group_vars/kibana-nodes.yml b/group_vars/kibana-nodes.yml new file mode 100644 index 0000000..42375f4 --- /dev/null +++ b/group_vars/kibana-nodes.yml @@ -0,0 +1,8 @@ +kibana_version: "{{ elk_version }}" +kibana_user: kibanaUser +kibana_group: kibanaGroup +kibana_enabled_on_boot: yes +kibana_server_host: 0.0.0.0 +kibana_elasticsearch_url : http://{{hostvars[groups['es-master-nodes'][0]]['ansible_'+public_iface]['ipv4']['address'] }}:{{cluster_http_port}} +kibana_instance: "{{kibana_server_port}}" + diff --git a/group_vars/logstash-nodes.yml b/group_vars/logstash-nodes.yml new file mode 100644 index 0000000..556b5eb --- /dev/null +++ b/group_vars/logstash-nodes.yml @@ -0,0 +1,12 @@ + +es_java_install: True +update_java: False +logstash_version: "{{ elk_version }}" +logstash_user: logstashUser +logstash_group: logstashGroup +logstash_enabled_on_boot: yes +logstash_install_plugins: + - logstash-input-beats +esMasterHosts: "{% for host in groups['es-master-nodes'] %} http://{{hostvars[host]['ansible_'+public_iface]['ipv4']['address'] }}:{{cluster_http_port}}{% endfor %}" +logstash_es_urls : "{{ esMasterHosts.split() }}" + diff --git a/inventory.yml b/inventory.yml new file mode 100644 index 0000000..e0f9c11 --- /dev/null +++ b/inventory.yml @@ -0,0 +1,53 @@ +es-master-nodes: + hosts: + es-master-1: # hostname + ansible_host: 192.168.33.25 # ip address + ansible_user: vagrant + memory: 2048 # ram to be assigned in MB + ansible_ssh_private_key_file: .vagrant/machines/es-master-1/virtualbox/private_key + +es-data-nodes: + hosts: + es-data-1: + ansible_host: 192.168.33.26 + ansible_user: vagrant + memory: 2048 + ansible_ssh_private_key_file: .vagrant/machines/es-data-1/virtualbox/private_key + + es-data-2: + ansible_host: 192.168.33.27 + ansible_user: vagrant + memory: 2048 + ansible_ssh_private_key_file: .vagrant/machines/es-data-2/virtualbox/private_key + +kibana-nodes: + hosts: + kibana-1: + ansible_host: 192.168.33.28 + ansible_user: vagrant + memory: 512 + ansible_ssh_private_key_file: .vagrant/machines/kibana-1/virtualbox/private_key + +logstash-nodes: + hosts: + logstash-1: + ansible_host: 192.168.33.29 + ansible_user: vagrant + memory: 1536 + ansible_ssh_private_key_file: .vagrant/machines/logstash-1/virtualbox/private_key + +filebeat-nodes: + hosts: + filebeat-1: + ansible_host: 192.168.33.30 + ansible_user: vagrant + memory: 512 + ansible_ssh_private_key_file: .vagrant/machines/filebeat-1/virtualbox/private_key + + filebeat-2: + ansible_host: 192.168.33.31 + ansible_user: vagrant + memory: 512 + ansible_ssh_private_key_file: .vagrant/machines/filebeat-2/virtualbox/private_key + +