hdfs-site
dfs.namenode.audit.log.async=true
# DataNode block reports and heartbeats (if no separate lifeline), also ZKFC periodic health checks. Not for client application.
#dfs.namenode.servicerpc-address=$NN_HOSTNAME:8040
# or if HA,
dfs.namenode.servicerpc-address.$NN_SERVICENAME.nn1=$NN1_HOSTNAME:8040
dfs.namenode.servicerpc-address.$NN_SERVICENAME.nn2=$NN2_HOSTNAME:8040
# Above change requires stopping ZKFC and "sudo -u hdfs hdfs zkfc –formatZK"
# 20 * log2(Cluster Size) but lower than 200. NOTE: log2(1000) = 9.965...
dfs.namenode.handler.count=200
dfs.namenode.service.handler.count=40 # default is 10
dfs.namenode.lifeline.rpc-address.$NN_SERVICENAME.nn1=$NN1_HOSTNAME:8050
dfs.namenode.lifeline.rpc-address.$NN_SERVICENAME.nn2=$NN2_HOSTNAME:8050
# RPC Congestion Control only for NameNode (now client) port 8020 https://issues.apache.org/jira/browse/HADOOP-10597 hadoop 2.8.0
ipc.8020.backoff.enable=true
ipc.8020.callqueue.impl=org.apache.hadoop.ipc.FairCallQueue
# (optional) Enable RPC Caller Context to track the “bad” jobs https://issues.apache.org/jira/browse/HDFS-9184
hadoop.caller.context.enabled=true
dfs.namenode.startup.delay.block.deletion.sec
hadoop-env
export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -Xms1G -Xmx1G -XX:NewSize=128M -XX:MaxNewSize=128M -XX:PermSize=128M -XX:MaxPermSize=256M -verbose:gc -Xloggc:/Users/chris/hadoop-deploy-trunk/hadoop-3.0.0-SNAPSHOT/logs/gc.log-`date +'%Y%m%d%H%M'` -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:ErrorFile=/Users/chris/hadoop-deploy-trunk/hadoop-3.0.0-SNAPSHOT/logs/hs_err_pid%p.log -XX:+HeapDumpOnOutOfMemoryError $HADOOP_NAMENODE_OPTS"
0 件のコメント:
コメントを投稿