Documentation fonctionnelle mais non terminée
Elastic Stack : https://www.elastic.co/fr/blog/elastic-stack-5-0-0-released
Retour d'expérience :
Retour d'installation : http://www.alasta.com/bigdata/2016/05/05/elasticstack-alpha-decouverte.html (2016) - http://magieweb.org/2017/04/tutoriel-mise-en-place-dun-serveur-de-monitoring-avec-elastic-stack-elk/ (2017) - http://blog.kinokocorp.com/?p=191 (2017 - Centos7 )
yum install java-1.8.0-openjdk
rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch mkdir -p /local/rpm cd /local/rpm
https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.5.2.rpm rpm --install elasticsearch-5.2.1.rpm
cluster.name: cluster-test node.name: ${HOSTNAME} bootstrap.memory_lock: true path.data: /local/elasticsearch/data path.logs: /local/elasticsearch/logs network.host: localhost http.port: 9200 #memory_lock = Désactiver le swap pour Elasticsearch : ( pour la gestion java des gros traitement)
#Décommenter la ligne suivante : LimitMEMLOCK=infinity #Supprimer l'option --quiet du paramètre ExecStart pour voir les évènements elasticsearch dans journalctl : --quiet
#Décommente la ligne suivante : MAX_LOCKED_MEMORY=unlimited
systemctl daemon-reload
systemctl enable elasticsearch
systemctl start elasticsearch
netstat -ltpn tcp6 0 0 127.0.0.1:9200 :::* LISTEN 2344/java curl -XGET 'localhost:9200/_nodes?filter_path=**.mlockall&pretty' nodes :{......} curl -XGET 'localhost:9200/?pretty' { "name" : "8Y5O47R", "cluster_name" : "elasticsearch", "cluster_uuid" : "2tt8eL_2TKuUsHVzflH6xQ", "version" : { "number" : "5.5.2", "build_hash" : "b2f0c09", "build_date" : "2017-08-14T12:33:14.154Z", "build_snapshot" : false, "lucene_version" : "6.6.0" }, "tagline" : "You Know, for Search" }
mkdir /local/kafka cd /local/kafka wget http://apache.crihan.fr/dist/kafka/0.11.0.0/kafka_2.12-0.11.0.0.tgz tar -xvf kafka_2.12-0.11.0.0.tgz cd kafka_2.12-0.11.0.0 groupadd kafka useradd kafka -d "/local/kafka/" -s "/bin/sh" -g "kafka" -M
# Conversion au format pkcs12 openssl pkcs12 -export -in /etc/pki/certs/cert.crt -inkey /etc/pki/certs/cert.key -chain -CAfile /etc/pki/certs/certCA.crt -name "elasticstack" -out elasticstack.p12 # import dans le keystore keytool -importkeystore -deststorepass hhjjkk -destkeystore server.keystore.jks -srckeystore elasticstack.p12 -srcstoretype PKCS12 # Lister le keystore: keytool -list -keystore server.keystore.jks # Autorité de certification : keytool -keystore server.truststore.jks -alias CARoot -import -file /etc/pki/certs/certCA.crt
# Ecoute du port + fix problème fqdn/certificat listeners=PLAINTEXT://:9092,SSL://:9093 advertised.host.name=kafka1.domaine.fr advertised.listeners=PLAINTEXT://kafka1.domaine.fr:9092,SSL://kafka1.domaine.fr:9093 # Replications sur les deux noeud offsets.topic.replication.factor=2 transaction.state.log.replication.factor=2 transaction.state.log.min.isr=2 default.replication.factor=2 offsets.topic.replication.factor=3 # SSL ssl.keystore.location=/local/kafka/kafka_2.12-0.11.0.0/server.keystore.jks ssl.keystore.password=hhjjkk ssl.key.password=hhjjkk ssl.truststore.location=/local/kafka/kafka_2.12-0.11.0.0/server.truststore.jks ssl.truststore.password=hhjjkk
dataDir=/tmp/zookeeper clientPort=2181 tickTime=2000 initLimit=10 syncLimit=5 server.1=kafka1.domaine.fr:2888:3888 server.2=kafka2.domaine.fr:2888:3888
[Unit] Description=Apache Zookeeper server (Kafka) Documentation=http://zookeeper.apache.org Requires=network.target remote-fs.target After=network.target remote-fs.target [Service] Type=simple User=kafka Group=kafka Environment=JAVA_HOME=/usr/lib/jvm/jre-1.8.0-openjdk ExecStart=/local/kafka/kafka_2.12-0.11.0.0/bin/zookeeper-server-start.sh /local/kafka/kafka_2.12-0.11.0.0/config/zookeeper.properties ExecStop=/local/kafka/kafka_2.12-0.11.0.0/bin/zookeeper-server-stop.sh [Install] WantedBy=multi-user.target
[Unit] Description=Apache Kafka server (broker) Documentation=http://kafka.apache.org/documentation.html Requires=network.target remote-fs.target After=network.target remote-fs.target kafka-zookeeper.service [Service] Type=simple User=kafka Group=kafka Environment=JAVA_HOME=/usr/lib/jvm/jre-1.8.0-openjdk ExecStart=/local/kafka/kafka_2.12-0.11.0.0/bin/kafka-server-start.sh /local/kafka/kafka_2.12-0.11.0.0/config/server.properties ExecStop=/local/kafka/kafka_2.12-0.11.0.0/bin/kafka-server-stop.sh [Install] WantedBy=multi-user.target
systemctl daemon-reload systemctl start kafka-zookeeper.service systemctl start kafka.service
git clone https://github.com/yahoo/kafka-manager.git cd kafka-manager/ ./sbt clean dist cd target/universal/ unzip kafka-manager-1.3.3.13.zip cd kafka-manager-1.3.3.13 ZK_HOSTS=localhost:2181 ./bin/kafka-manager
—-
wget https://artifacts.elastic.co/downloads/logstash/logstash-5.5.2.rpm rpm -ivh logstash-5.5.2.rpm
erreur : [2017-09-04T15:44:06,011][ERROR][logstash.inputs.beats ] Looks like you either have an invalid key or your private key was not in PKCS8 format. {:exception=>java.lang.IllegalArgumentException: File does not contain valid private key: /etc/pki/certs/cert.key} solution : 15:45:45 root@elasticstack:/local/rpm# openssl pkcs8 -topk8 -inform PEM -outform PEM -in /etc/pki/certs/cert.key -out /etc/pki/certs/cert.pem -nocrypt
cd /etc/logstash/conf.d/
input { kafka { bootstrap_servers => 'kafka1.domaine.fr:9092,kafka2.domaine.fr:9092' topics => ["WEB-TEST_APACHE"] auto_offset_reset => "earliest" /* pour que logstash recupère les logs manquant */ codec => json {} } } filter { grok { match => { "message" => ["%{IPORHOST:[apache2][access][remote_ip]} - %{DATA:[apache2][access][user_name]} \[%{HTTPDATE:[apache2][access][time]}\] \"%{WORD:[apache2][access][method]} %{DATA:[apache2][access][url]} HTTP/%{NUMBER:[apache2][access][http_version]}\" %{NUMBER:[apache2][access][response_code]} %{NUMBER:[apache2][access][body_sent][bytes]}( \"%{DATA:[apache2][access][referrer]}\")?( \"%{DATA:[apache2][access][agent]}\")?", "%{IPORHOST:[apache2][access][remote_ip]} - %{DATA:[apache2][access][user_name]} \\[%{HTTPDATE:[apache2][access][time]}\\] \"-\" %{NUMBER:[apache2][access][response_code]} -" ] } remove_field => "message" } mutate { add_field => { "read_timestamp" => "%{@timestamp}" } } date { match => [ "[apache2][access][time]", "dd/MMM/YYYY:H:m:s Z" ] remove_field => "[apache2][access][time]" } useragent { source => "[apache2][access][agent]" target => "[apache2][access][user_agent]" remove_field => "[apache2][access][agent]" } geoip { source => "[apache2][access][remote_ip]" target => "[apache2][access][geoip]" } } output { elasticsearch { index => "webtest-logs-%{+YYYY.MM.dd}" hosts => ["localhost:9200"] sniffing => false } stdout { codec => rubydebug } }
/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/web-test.conf -t
systemctl enable logstash
systemctl start logstash
wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-5.5.2-x86_64.rpm rpm -vi filebeat-5.5.2-x86_64.rpm
filebeat.prospectors: - input_type: log paths: - /var/log/httpd/*log document_type: apache - input_type: log paths: - /var/log/*.log .............
output.logstash: # The Logstash hosts hosts: ["elasticstack.domaine.fr:5443"] # Optional SSL. By default is off. # List of root certificates for HTTPS server verifications ssl.certificate_authorities: ["/etc/pki/certs/certCA.crt"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" # Client Certificate Key #ssl.key: "/etc/pki/client/cert.key" template.name: "filebeat" template.path: "filebeat.template.json" template.overwrite: false
output.kafka: output.kafka: # initial brokers for reading cluster metadata #hosts: ["kafka1.domaine.fr:9092","kafka2.domaine.fr:9092"] hosts: ["kafka1.domaine.fr:9093","kafka2.domaine.fr:9093"] # message topic selection + partitioning topic: WEB-TEST_APACHE #topic: '%{[type]}' partition.round_robin: reachable_only: false required_acks: 1 compression: gzip max_message_bytes: 1000000 ssl.certificate_authorities: ["/etc/pki/certs/certCA.crt"] ssl.certificate: "/etc/pki/certs/cert.crt" ssl.key: "/etc/pki/certs/cert.key"
systemctl enable filebeat
systemctl start filebeat
wget https://artifacts.elastic.co/downloads/kibana/kibana-5.5.2-x86_64.rpm rpm -ivh kibana-5.5.2-x86_64.rpm
server.port: 5601 server.host: "localhost" elasticsearch.url: "http://localhost:9200"
systemctl enable kibana
systemctl start kibana
yum install httpd vim /etc/httpd/conf.d/kibana.conf <Location "/"> ProxyPass "http://localhost:5601/" ProxyPassReverse "http://localhost:5601/" # Ajouter authentification de votre choix (htpasswd, ldap, ... ) </Location>
cd /local/ git clone https://github.com/royrusso/elasticsearch-HQ.git
..... http.cors.allow-origin: "*" #Mettre ip autorisé à faire l'admin http.cors.enabled: true
<Location "/elasticsearch-HQ"> ProxyPass "!" </Location> <Directory "/var/www/html/elasticsearch-HQ"> Options Indexes FollowSymLinks AllowOverride None Require all granted </Directory>
Attention ! Beaucoup de CPU quand beaucoup de requêtes
yum install libpcap wget https://artifacts.elastic.co/downloads/beats/packetbeat/packetbeat-5.6.0-x86_64.rpm rpm -vi packetbeat-5.6.0-x86_64.rpm
/usr/share/packetbeat/scripts/import_dashboards -es http://elasticsearch.domaine.fr:9200
#################
grok debugger http://grokdebug.herokuapp.com/