wget https://mirrors.linyaohong.com/src/jdk-8u241-linux-x64.tar.gz tar zxf jdk-8u241-linux-x64.tar.gz mv jdk1.8.0_241/ /usr/local/ cat >>/etc/profile <restart.sh <> /etc/supervisord.d/node.ini < zookeeper/myid echo 2 > zookeeper/myid echo 3 > zookeeper/myid # 启动 zookeeper #.zookeeper-server-start.sh -daemon /data/server/kafka_2.12-2.3.0/config/zookeeper.properties cat >> /etc/supervisord.d/zookeeper.ini <> /etc/supervisord.d/kafka.ini <> /tmp/test_log.log ############################## es end ############################### cd /data/server/logstash-7.3.0/config/ vim input-kafka.conf # #logstash启动: nohup /data/server/logstash-7.3.0/.logstash -f /data/server/logstash-7.3.0/config/input-kafka.conf --config.reload.automatic & cat >restart.sh <>/etc/supervisord.d/node.ini < "elk:9092,elk2:9092,elk3:9092" #zookeeper地址 topics => ["vipthink","ccvipthink","jyvipthink"] codec => "json" #与Shipper端output配置项一致 consumer_threads => 1 #消费的线程数 decorate_events => true #在输出消息的时候回输出自身的信息,包括:消费消息的大小、topic来源以及consumer的group信息。 type => "logstash_mixins" } } output { if [type] == "logstash_mixins" { elasticsearch { action => "index" hosts => ["elk:9200","elk2:9200","elk3:9200"] # The operation on ES index => "%{[fields][type]}-%{+YYYY.MM.dd}" user => "elastic" password => "e9o4qkpVdWR7K2FYz2q" } } } # 常用查询 GET _template/stringDefault GET _template/logstash GET /_cat/nodes?v GET /_cluster/health?pretty GET _cat/aliases?v GET _cat/templates GET /_cluster/settings 设置 Kibana -- 高级设置 discover:sampleSize # 修改kibana默认只能查看 500条数据的限制 # 查看集群信息 GET /_cat/nodes?v # 查看集群设置 GET /_cluster/health?pretty status # 集群状态,分为green、yellow和red number_of_nodes # 集群的节点数 number_of_data_nodes # 集群数据节点数 active_primary_shards # 集群中所有活跃的主分片数 active_shards # 集群中所有活跃的分片数 relocating_shards # 当前节点迁往其他节点的分片数量,通常为0,当有节点加入或者退出时该值会增加。 initializing_shards # 正在初始化的分片。 unassigned_shards # 未分配的分片数,通常为0,当有某个节点的副本分片丢失该值就会增加。 number_of_pending_tasks # 是指主节点创建索引并分配shards等任务,如果该指标数值一直未减小代表集群存在不稳定因素 active_shards_percent_as_number # 集群分片健康度,活跃分片数占总分片数比例。 number_of_pending_tasks # pending task只能由主节点来进行处理,这些任务包括创建索引并将shards分配给节点 # 查看模版设置/ 后面是模版名字(根据实际情况填写) GET _template/logstash PUT _template/logstash { "settings": { "index": { "number_of_shards": "2", "number_of_replicas": "1" } } } # 设置模版 # 也可以在模版界面通过web管理修改 # 01 web管理界面修改 # Index settings { "index": { "number_of_shards": "3", "number_of_replicas": "5" } } # Mappings { "dynamic_templates": [ { "strings": { "mapping": { "search_analyzer": "ik_smart", "analyzer": "ik_max_word", "type": "text" }, "match_mapping_type": "string" } } ] } # api修改 PUT _template/logstash { "order": 0, "index_patterns": [ "*" ], "settings": { "index": { "number_of_replicas": "5", "number_of_shards": "3" } }, "mappings": { "dynamic_templates": [ { "strings": { "mapping": { "search_analyzer": "ik_smart", "analyzer": "ik_max_word", "type": "text" }, "match_mapping_type": "string" } } ] } } # 单独设置模版 mappings # 设置为 text文本· PUT _template/stringDefault { "order": 10, "index_patterns": [ "*" ], "mappings": { "_default_":{ "dynamic_templates": [ { "strings": { "match_mapping_type": "string", "mapping": { "type": "text", "analyzer": "ik_max_word", "search_analyzer": "ik_smart" } } } ] } } } # 根据实际情况设置 number_of_replicas 是数据备份数,如果只有一台机器,设置为0 number_of_shards 是数据分片数,默认为5,有时候设置为3 # text类型 数据被用来索引长文本,例如电子邮件主体部分或者一款产品的介绍,这些文本会被分析,在建立索引文档之前会被分词器进行分词,转化为词组。 经过分词机制之后es允许检索到该文本切分而成的词语,但是text类型的数据不能用来过滤、排序和聚合等操作。 # keyword类型 数据可以满足电子邮箱地址、主机名、状态码、邮政编码和标签等数据的要求,不进行分词,常常被用来过滤、排序和聚合。 综上,可以发现text类型在存储数据的时候会默认进行分词,并生成索引。而keyword存储数据的时候,不会分词建立索引,显然,这样划分数据更加节省内存。 # transient 的参数是临时生效的,重启后丢失, # persistents 永久 # 查看集群设置 GET /_cluster/settings # 设置最大分片 PUT _cluster/settings { "persistent": { "cluster.max_shards_per_node": 5000 } } PUT /_cluster/settings { "transient": { "cluster":{ "max_shards_per_node": 5000 } } } # 设置默认 主分片 和 副本分片 PUT _template/logstash { "index_patterns": ["*"], "settings": { "number_of_shards": 5, "number_of_replicas": 2 } } # 集群综合设置 PUT /_cluster/settings { "persistent" : { "cluster" : { "routing" : { "allocation" : { "cluster_concurrent_rebalance" : "10", "node_concurrent_recoveries" : "2", "exclude" : { "_ip" : "" } } }, "max_shards_per_node" : "6000" }, "indices" : { "recovery" : { "max_bytes_per_sec" : "40mb" } } }, "transient" : { "cluster" : { "routing" : { "allocation" : { "node_concurrent_recoveries" : "2", "exclude" : { "_ip" : "" } } }, "max_shards_per_node" : "6000" }, "indices" : { "recovery" : { "max_bytes_per_sec" : "40mb" } } } } access_log /var/log/nginx/access.log access_exp; log_format json escape=json '{ "@timestamp": "$time_iso8601", ' '"time": "$ ", ' '"remote_addr": "$remote_addr", ' '"remote_user": "$remote_user", ' '"body_bytes_sent": $body_bytes_sent, ' '"request_time": $request_time, ' '"status": $status, ' '"host": "$host", ' '"request": "$request", ' '"request_method": "$request_method", ' '"uri": "$uri", ' '"http_referrer": "$http_referer", ' '"http_x_forwarded_for": "$http_x_forwarded_for", ' '"http_user_agent": "$http_user_agent", ' '"up_addr": "$upstream_addr", ' '"up_status": "$upstream_status", ' '"up_rept": "$upstream_response_time" ' '}'; '{"nginx_timestamp":"$time_iso8601","clientip":"$remote_addr","nginx_host":"$server_addr","host":"$http_host","request":"$request","url":"$request_uri","upstreamhost":"$upstream_addr","status":"$status","body_bytes_sent":"$body_bytes_sent","request_time":"$request_time","upstream_response_time":"$upstream_response_time","http_x_forwarded_for":"$http_x_forwarded_for","referer":"$http_referer","http_user_agent":"$http_user_agent","request_length":"$request_length","request_method":"$request_method"}' [Unit] Description=Apache Kafka server (broker) Documentation=http://kafka.apache.org/documentation.html Requires=network.target remote-fs.target After=network.target remote-fs.target kafka-zookeeper.service [Service] Type=simple User=root Group=root Environment="JAVA_HOME=/usr/local/jdk1.8.0_241" ExecStart=./kafka-server-start.sh /data/server/kafka_2.12-2.3.0/config/server.properties ExecStop=/data/server/kafka_2.12-2.3.0//./kafka-server-stop.sh Restart=on-abnormal [Install] WantedBy=multi-user.target [Unit] Description=Apache Kafka Zookeeper server Documentation=http://zookeeper.apache.org Requires=network.target remote-fs.target After=network.target remote-fs.target [Service] Type=simple User=root Group=root Environment="JAVA_HOME=/usr/local/jdk1.8.0_241" ExecStart= .zookeeper-server-start.sh /data/server/kafka_2.12-2.3.0/config/zookeeper.properties ExecStop= .zookeeper-server-stop.sh Restart=on-abnormal [Install] WantedBy=multi-user.target systemctl daemon-reload systemctl stop kafka systemctl restart kafka systemctl restart zookeeper systemctl status kafka systemctl status zookeeper systemctl restart kafka systemctl status kafka