1. run JMX exporter as a java agent with all the four daemons. For this I have added EXTRA_JAVA_OPTS in hadoop-env.sh and yarn-env.sh :
[root@cloud01 hadoop]# cat yarn-env.sh |egrep -v '^$|#'
export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -javaagent:/home/ec2-user/jmx_exporter/jmx_prometheus_javaagent-0.3.1.jar=9104:/home/ec2-user/jmx_exporter/prometheus_config.yml"
export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -javaagent:/home/ec2-user/jmx_exporter/jmx_prometheus_javaagent-0.3.1.jar=9105:/home/ec2-user/jmx_exporter/prometheus_config.yml"
[root@do1cloud01 hadoop]# cat hadoop-env.sh |egrep -v '^$|#'
JAVA_HOME=/do1cloud/jdk1.8.0_151
export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
case ${HADOOP_OS_TYPE} in
Darwin*)
export HADOOP_OPTS="${HADOOP_OPTS} -Djava.security.krb5.realm= "
export HADOOP_OPTS="${HADOOP_OPTS} -Djava.security.krb5.kdc= "
export HADOOP_OPTS="${HADOOP_OPTS} -Djava.security.krb5.conf= "
;;
esac
export HADOOP_NAMENODE_OPTS="$HADOOP_NAMENODE_OPTS -javaagent:/home/ec2-user/jmx_exporter/jmx_prometheus_javaagent-0.3.1.jar=9102:/home/ec2-user/jmx_exporter/prometheus_config.yml"
export HADOOP_DATANODE_OPTS="$HADOOP_DATANODE_OPTS -javaagent:/home/ec2-user/jmx_exporter/jmx_prometheus_javaagent-0.3.1.jar=9103:/home/ec2-user/jmx_exporter/prometheus_config.yml"
2.prometheus的配置
[root@do1cloud03 prometheus]# cat prometheus.yml|egrep -v '^$|#' global: rule_files: - "rules/cpu2mem.yml" scrape_configs: - job_name: 'federate' scrape_interval: 10s honor_labels: true metrics_path: '/federate' params: 'match[]': - '{job=~".+"}'
#hadoop - job_name: 'YARN_RESOURCEMANAGER' static_configs: - targets: ['10.0.0.99:9104','10.0.0.65:9104','10.0.0.77:9104'] - job_name: 'YARN_NODEMANAGER' static_configs: - targets: ['10.0.0.99:9105','10.0.0.65:9105','10.0.0.77:9105'] - job_name: 'HADOOP_NAMENODE' static_configs: - targets: ['10.0.0.99:9102','10.0.0.65:9102','10.0.0.77:9102'] - job_name: 'HADOOP_DATANODE' static_configs: - targets: ['10.0.0.99:9103','10.0.0.65:9103','10.0.0.77:9103'] #hbase - job_name: 'hbase' static_configs: - targets: ['10.0.0.99:7000'] labels: instance: 99_7000 - targets: ['10.0.0.65:7000'] labels: instance: 65_7000 - targets: ['10.0.0.77:7000'] labels: instance: 77_7000
3.jar的配置文件
[root@do1cloud02 jmx_exporter]# cat /home/ec2-user/jmx_exporter/prometheus_config.yml #rules: # - pattern: Hadoop<service=ResourceManager, name=MetricsSystem, sub=Stats><>NumAllSources # name: sources # labels: # app_id: "hadoop_rm" --- rules: - pattern: '.*'
--------------------------------------------------------------------------------------------------
4.hbase的監控
[root@do1cloud01 conf]# cat hbase-env.sh |egrep -v '^$|#' export JAVA_HOME=/do1cloud/jdk1.8.0_151 export HBASE_CLASSPATH=/do1cloud/hadoop-3.0.3/etc/hadoop export HBASE_MANAGES_ZK=false export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC" HBASE_OPTS="$HBASE_OPTS -javaagent:/path/jmx_prometheus_javaagent-0.9.jar=7000:/path/hbase_jmx_config.yaml"
#再hbase啟動后注釋掉,再hbase shell
5.yaml文件內容
/path/hbase_jmx_config.yaml --- rules: - pattern: '.*'
參考:https://blog.godatadriven.com/hbase-prometheus-monitoring