Skip to content

Commit

Permalink
wifi fun
Browse files Browse the repository at this point in the history
  • Loading branch information
rmoff committed Mar 4, 2020
1 parent 89047ca commit c88f2fe
Show file tree
Hide file tree
Showing 5 changed files with 482 additions and 0 deletions.
2 changes: 2 additions & 0 deletions wifi-fun/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
mqtt.credentials
container_data
20 changes: 20 additions & 0 deletions wifi-fun/copy_from_ccloud.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
#!/bin/bash

source .env

while [ 1 -eq 1 ]
do
kafkacat -b $CCLOUD_BROKER_HOST \
-X security.protocol=SASL_SSL -X sasl.mechanisms=PLAIN \
-X sasl.username="$CCLOUD_API_KEY" -X sasl.password="$CCLOUD_API_SECRET" \
-X ssl.ca.location=/usr/local/etc/openssl/cert.pem -X api.version.request=true \
-X auto.offset.reset=earliest \
-K: \
-G asgard03_copy_to_local_05 pcap | \
kafkacat -b localhost:9092,localhost:19092,localhost:29092 \
-t pcap \
-K: -P -T

sleep 30

done
38 changes: 38 additions & 0 deletions wifi-fun/create_replicator_source.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
#!/bin/bash

source .env

echo "Waiting for Kafka Connect to start listening on localhost:58083 ⏳"
while : ; do
curl_status=$(curl -s -o /dev/null -w %{http_code} http://localhost:58083/connectors)
echo -e $(date) " Kafka Connect listener HTTP state: " $curl_status " (waiting for 200)"
if [ $curl_status -eq 200 ] ; then
break
fi
sleep 5
done
#
epoch=$(date +%s)
curl -s -X PUT -H "Accept:application/json" \
-H "Content-Type:application/json" "http://localhost:58083/connectors/replicator-source"$epoch"/config" \
-d '
{
"connector.class": "io.confluent.connect.replicator.ReplicatorSourceConnector",
"key.converter": "io.confluent.connect.replicator.util.ByteArrayConverter",
"value.converter": "io.confluent.connect.replicator.util.ByteArrayConverter",
"header.converter": "io.confluent.connect.replicator.util.ByteArrayConverter",
"src.kafka.bootstrap.servers": "'$CCLOUD_BROKER_HOST':9092",
"src.kafka.security.protocol": "SASL_SSL",
"src.kafka.sasl.mechanism": "PLAIN",
"src.kafka.sasl.jaas.config": "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"'$CCLOUD_API_KEY'\" password=\"'$CCLOUD_API_SECRET'\";",
"src.consumer.group.id": "replicator-'$epoch'",
"dest.kafka.bootstrap.servers": "kafka-1:39092,kafka-2:49092,kafka-3:59092",
"topic.whitelist": "pcap,pcap.wlan.fc.type_subtype",
"topic.rename.format":"${topic}-'$epoch'",
"confluent.license":"",
"confluent.topic.bootstrap.servers":"kafka-1:39092,kafka-2:49092,kafka-3:59092",
"confluent.topic.replication.factor":1,
"offset.start":"consumer"
}' | jq '.'

# "topic.rename.format":"${topic}-ccloud-'$epoch'",
330 changes: 330 additions & 0 deletions wifi-fun/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,330 @@
---
version: '3'
services:
zookeeper:
image: confluentinc/cp-zookeeper:5.4.0
container_name: zookeeper
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
# volumes:
# - ./data/container_data/zk-data:/var/lib/zookeeper/data
# - ./data/container_data/zk-txn-logs:/var/lib/zookeeper/log

kafka-1:
image: confluentinc/cp-enterprise-kafka:5.4.0
container_name: kafka-1
depends_on:
- zookeeper
ports:
- 9092:9092
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:39092,HOST://0.0.0.0:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-1:39092,HOST://localhost:9092
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 100
# volumes:
# - ./data/container_data/kafka-1-data:/var/lib/kafka/data

kafka-2:
image: confluentinc/cp-enterprise-kafka:5.4.0
container_name: kafka-2
depends_on:
- zookeeper
ports:
- 19092:19092
environment:
KAFKA_BROKER_ID: 2
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:49092,HOST://0.0.0.0:19092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-2:49092,HOST://localhost:19092
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 100
# volumes:
# - ./data/container_data/kafka-2-data:/var/lib/kafka/data

kafka-3:
image: confluentinc/cp-enterprise-kafka:5.4.0
container_name: kafka-3
depends_on:
- zookeeper
ports:
- 29092:29092
environment:
KAFKA_BROKER_ID: 3
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:59092,HOST://0.0.0.0:29092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-3:59092,HOST://localhost:29092
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 100
# volumes:
# - ./data/container_data/kafka-3-data:/var/lib/kafka/data

schema-registry:
image: confluentinc/cp-schema-registry:5.4.0
ports:
- 8081:8081
container_name: schema-registry
depends_on:
- zookeeper
- kafka-1
- kafka-2
- kafka-3
environment:
SCHEMA_REGISTRY_HOST_NAME: schema-registry
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka-1:39092,PLAINTEXT://kafka-2:49092,PLAINTEXT://kafka-3:59092
SCHEMA_REGISTRY_CUB_KAFKA_TIMEOUT: 300

ksqldb:
image: confluentinc/ksqldb-server:0.7.1
hostname: ksqldb
container_name: ksqldb
depends_on:
- kafka-1
# - kafka-connect-01
ports:
- "8088:8088"
environment:
KSQL_LISTENERS: http://0.0.0.0:8088
KSQL_BOOTSTRAP_SERVERS: kafka-1:39092,kafka-2:49092,kafka-3:59092
KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: "true"
KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: "true"
KSQL_KSQL_CONNECT_URL: http://kafka-connect-01:8083
KSQL_KSQL_SCHEMA_REGISTRY_URL: http://schema-registry:8081

# kafka-connect-01:
# image: confluentinc/cp-kafka-connect:5.4.0
# container_name: kafka-connect-01
# depends_on:
# - kafka-1
# - kafka-2
# - kafka-3
# - schema-registry
# - mysql
# ports:
# - 8083:8083
# environment:
# CONNECT_LOG4J_APPENDER_STDOUT_LAYOUT_CONVERSIONPATTERN: "[%d] %p %X{connector.context}%m (%c:%L)%n"
# CONNECT_CUB_KAFKA_TIMEOUT: 300
# CONNECT_BOOTSTRAP_SERVERS: "kafka-1:39092,kafka-2:49092,kafka-3:59092"
# CONNECT_REST_ADVERTISED_HOST_NAME: 'kafka-connect-01'
# CONNECT_REST_PORT: 8083
# CONNECT_GROUP_ID: kafka-connect-group-01
# CONNECT_CONFIG_STORAGE_TOPIC: _kafka-connect-group-01-configs
# CONNECT_OFFSET_STORAGE_TOPIC: _kafka-connect-group-01-offsets
# CONNECT_STATUS_STORAGE_TOPIC: _kafka-connect-group-01-status
# CONNECT_KEY_CONVERTER: io.confluent.connect.avro.AvroConverter
# CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
# CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
# CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
# CONNECT_INTERNAL_KEY_CONVERTER: 'org.apache.kafka.connect.json.JsonConverter'
# CONNECT_INTERNAL_VALUE_CONVERTER: 'org.apache.kafka.connect.json.JsonConverter'
# CONNECT_LOG4J_ROOT_LOGLEVEL: 'INFO'
# CONNECT_LOG4J_LOGGERS: 'org.apache.kafka.connect.runtime.rest=WARN,org.reflections=ERROR'
# CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: '1'
# CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: '1'
# CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: '1'
# CONNECT_PLUGIN_PATH: '/usr/share/java,/usr/share/confluent-hub-components/,/data/connect-jars'
# # External secrets config
# # See https://docs.confluent.io/current/connect/security.html#externalizing-secrets
# CONNECT_CONFIG_PROVIDERS: 'file'
# CONNECT_CONFIG_PROVIDERS_FILE_CLASS: 'org.apache.kafka.common.config.provider.FileConfigProvider'
# volumes:
# - ./data:/data
# - ${PWD}/.env:/data/credentials.properties
# command:
# # In the command section, $ are replaced with $$ to avoid the error 'Invalid interpolation format for "command" option'
# - bash
# - -c
# - |
# echo "Installing connector plugins"
# confluent-hub install --no-prompt confluentinc/kafka-connect-mqtt:1.2.3
# confluent-hub install --no-prompt debezium/debezium-connector-mysql:0.10.0
# #
# echo "Launching Kafka Connect worker"
# /etc/confluent/docker/run &
# #
# echo "Waiting for Kafka Connect to start listening on $$CONNECT_REST_ADVERTISED_HOST_NAME:$$CONNECT_REST_PORT ⏳"
# while : ; do
# curl_status=$$(curl -s -o /dev/null -w %{http_code} http://$$CONNECT_REST_ADVERTISED_HOST_NAME:$$CONNECT_REST_PORT/connectors)
# echo -e $$(date) " Kafka Connect listener HTTP state: " $$curl_status " (waiting for 200)"
# if [ $$curl_status -eq 200 ] ; then
# break
# fi
# sleep 5
# done
# #
# echo "Waiting for Schema Registry to start listening on schema-registry:8081 ⏳"
# while [ $$(curl -s -o /dev/null -w %{http_code} http://schema-registry:8081) -eq 000 ] ; do
# echo -e $$(date) " Schema Registry listener HTTP state: " $$(curl -s -o /dev/null -w %{http_code} http://schema-registry:8081) " (waiting for != 000)"
# sleep 5
# done
# #
# sleep infinity

replicator:
image: confluentinc/cp-enterprise-replicator:5.4.0
container_name: replicator
depends_on:
- kafka-1
- kafka-2
- kafka-3
- schema-registry
ports:
- 58083:58083
environment:
CONNECT_BOOTSTRAP_SERVERS: 'kafka-1:39092,kafka-2:49092,kafka-3:59092'
CONNECT_REST_ADVERTISED_HOST_NAME: 'replicator'
CONNECT_REST_PORT: 58083
CONNECT_GROUP_ID: compose-replicator
CONNECT_CONFIG_STORAGE_TOPIC: _replicator-configs
CONNECT_OFFSET_STORAGE_TOPIC: _replicator-offsets
CONNECT_STATUS_STORAGE_TOPIC: _replicator-status
CONNECT_KEY_CONVERTER: io.confluent.connect.avro.AvroConverter
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081'
CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081'
CONNECT_INTERNAL_KEY_CONVERTER: 'org.apache.kafka.connect.json.JsonConverter'
CONNECT_INTERNAL_VALUE_CONVERTER: 'org.apache.kafka.connect.json.JsonConverter'
CONNECT_LOG4J_ROOT_LOGLEVEL: 'INFO'
CONNECT_LOG4J_LOGGERS: 'org.apache.kafka.connect.runtime.rest=WARN,org.reflections=ERROR'
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: '1'
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: '1'
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: '1'
CONNECT_PLUGIN_PATH: '/usr/share/java,/usr/share/confluent-hub-components/'
CONNECT_CONNECTOR_CLIENT_CONFIG_OVERRIDE_POLICY: 'All'


# elasticsearch:
# image: docker.elastic.co/elasticsearch/elasticsearch:7.5.0
# container_name: elasticsearch
# ports:
# - 9200:9200
# environment:
# xpack.security.enabled: "false"
# ES_JAVA_OPTS: "-Xms1g -Xmx1g"
# discovery.type: "single-node"
# # volumes:
# # - ./data/container_data/elasticserarch:/usr/share/elasticsearch/data
# command:
# - bash
# - -c
# - |
# /usr/local/bin/docker-entrypoint.sh &
# echo "Waiting for Elasticsearch to start ⏳"
# while [ $$(curl -s -o /dev/null -w %{http_code} http://localhost:9200/) -eq 000 ] ; do
# echo -e $$(date) " Elasticsearch listener HTTP state: " $$(curl -s -o /dev/null -w %{http_code} http://localhost:9200/) " (waiting for != 000)"
# sleep 5
# done

# curl -XPUT "http://localhost:9200/_template/kafkaconnect/" -H 'Content-Type: application/json' -d'
# {
# "index_patterns": "*",
# "settings": { "number_of_shards": 1, "number_of_replicas": 0 },
# "mappings": { "dynamic_templates": [
# { "dates": { "match": "*_TS", "mapping": { "type": "date" } } },
# { "heights": { "match": "HEIGHT", "mapping": { "type": "float" } } },
# { "locations": { "match": "LOCATION", "mapping": { "type": "geo_point" } } }
# ] } }'

# sleep infinity


# kibana:
# image: docker.elastic.co/kibana/kibana:7.5.0
# container_name: kibana
# depends_on:
# - elasticsearch
# ports:
# - 5601:5601
# environment:
# xpack.security.enabled: "false"
# discovery.type: "single-node"
# # command:
# # - bash
# # - -c
# # - |
# # /usr/local/bin/kibana-docker &
# # #
# # echo "Waiting for Kibana to start ⏳"
# # while : ; do
# # curl_status=$$(curl -s -o /dev/null -w %{http_code} http://localhost:5601/api/kibana/settings)
# # echo -e $$(date) " Kibana listener HTTP state: " $$curl_status " (waiting for 200)"
# # if [ $$curl_status -eq 200 ] ; then
# # break
# # fi
# # sleep 5
# # done
# # #
# # echo "Waiting for Kibana API to be available ⏳"
# # while : ; do
# # kibana_status=$$(curl -s 'http://localhost:5601/api/kibana/settings')
# # echo -e $$(date) " Kibana API response: " $$kibana_status
# # if [ $$kibana_status != "Kibana server is not ready yet" ] ; then
# # break
# # fi
# # sleep 5
# # done
# # #
# # sleep 60
# # echo -e "\n--\n+> Setup Kibana objects"

# # echo -e "\n--\n+> Opt out of Kibana telemetry"
# # curl 'http://localhost:5601/api/telemetry/v1/optIn' -H 'kbn-xsrf: nevergonnagiveyouup' -H 'content-type: application/json' -H 'accept: application/json' --data-binary '{"enabled":false}' --compressed

# # echo -e "\n--\n+> Create Kibana index patterns"
# # curl -XPOST 'http://localhost:5601/api/saved_objects/index-pattern/runner_location_idx' \
# # -H 'kbn-xsrf: nevergonnagiveyouup' \
# # -H 'Content-Type: application/json' \
# # -d '{"attributes":{"title":"runner_location","timeFieldName":"EVENT_TS"}}'

# # curl -XPOST 'http://localhost:5601/api/saved_objects/index-pattern/runner_status_idx' \
# # -H 'kbn-xsrf: nevergonnagiveyouup' \
# # -H 'Content-Type: application/json' \
# # -d '{"attributes":{"title":"runner_status","timeFieldName":"EVENT_TS"}}'

# # echo -e "\n--\n+> Set default Kibana index"
# # curl -XPOST 'http://localhost:5601/api/kibana/settings' \
# # -H 'kbn-xsrf: nevergonnagiveyouup' \
# # -H 'content-type: application/json' \
# # -d '{"changes":{"defaultIndex":"runner_status_idx"}}'

# # echo -e "\n--\n+> Import Kibana objects"

# # sleep infinity

# mysql:
# # *-----------------------------*
# # To connect to the DB:
# # docker-compose exec mysql bash -c 'mysql -u root -p$MYSQL_ROOT_PASSWORD'
# # *-----------------------------*
# image: mysql:8.0
# container_name: mysql
# ports:
# - 3306:3306
# environment:
# - MYSQL_ROOT_PASSWORD=debezium
# - MYSQL_USER=mysqluser
# - MYSQL_PASSWORD=mysqlpw
# volumes:
# - ./data/ddl/users.sql:/docker-entrypoint-initdb.d/z99_dump.sql

# # postgres:
# # # *-----------------------------*
# # # To connect to the DB:
# # # docker-compose exec postgres bash -c 'psql -U $POSTGRES_USER $POSTGRES_DB'
# # # *-----------------------------*
# # image: postgres:11
# # environment:
# # - POSTGRES_USER=postgres
# # - POSTGRES_PASSWORD=postgres
# # volumes:
# # - ./data/postgres:/docker-entrypoint-initdb.d/
Loading

0 comments on commit c88f2fe

Please sign in to comment.