<?xml version="1.0"?>
<configuration>
<!--ha related configuration-->
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>zzrm</value>
</property>
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>nx11-15-6.58os.org</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>nx11-15-156.58os.org</value>
</property>
<!--rm1 address-->
<property>
<name>yarn.resourcemanager.resource-tracker.address.rm1</name>
<value>nx11-15-6.58os.org:8031</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address.rm1</name>
<value>nx11-15-6.58os.org:8030</value>
</property>
<property>
<name>yarn.resourcemanager.address.rm1</name>
<value>nx11-15-6.58os.org:8032</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address.rm1</name>
<value>nx11-15-6.58os.org:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rm1</name>
<value>nx11-15-6.58os.org:8088</value>
</property>
<!--rm2 address-->
<property>
<name>yarn.resourcemanager.resource-tracker.address.rm2</name>
<value>nx11-15-156.58os.org:8031</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address.rm2</name>
<value>nx11-15-156.58os.org:8030</value>
</property>
<property>
<name>yarn.resourcemanager.address.rm2</name>
<value>nx11-15-156.58os.org:8032</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address.rm2</name>
<value>nx11-15-156.58os.org:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rm2</name>
<value>nx11-15-156.58os.org:8088</value>
</property>
<property>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
<property>
<name>yarn.resourcemanager.zk-state-store.parent-path</name>
<value>/rmstore</value>
</property>
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>nx11-15-30.58os.org:2181,nx11-15-31.58os.org:2181,nx11-15-32.58os.org:2181/zzhadoop</value>
</property>
<property>
<name>yarn.resourcemanager.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.ha.automatic-failover.embedded</name>
<value>true</value>
</property>
<property>
<name>yarn.client.failover-proxy-provider</name>
<value>org.apache.hadoop.yarn.client.ConfiguredRMFailoverProxyProvider</value>
</property>
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<property>
<description>Classpath for typical applications.</description>
<name>yarn.application.classpath</name>
<value>
$HADOOP_HOME/etc/hadoop/,
$HADOOP_HOME/share/hadoop/common/*,$HADOOP_HOME/share/hadoop/common/lib/*,
$HADOOP_HOME/share/hadoop/hdfs/*,$HADOOP_HOME/share/hadoop/hdfs/lib/*,
$HADOOP_HOME/share/hadoop/mapreduce/*,$HADOOP_HOME/share/hadoop/mapreduce/lib/*,
$HADOOP_HOME/share/hadoop/yarn/*,$HADOOP_HOME/share/hadoop/yarn/lib/*,
$HADOOP_HOME/share/hadoop/tools/lib/*
</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle,spark_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.spark_shuffle.class</name>
<value>org.apache.spark.network.yarn.YarnShuffleService</value>
</property>
<!-- 配置多块磁盘目录,用逗号隔开 -->
<property>
<name>yarn.nodemanager.local-dirs</name>
<value>file:///data0/yarn/nm-local-dir,file:///data1/yarn/nm-local-dir,file:///data2/yarn/nm-local-dir,file:///data3/yarn/nm-local-dir,file:///data4/yarn/nm-local-dir,file:///data5/yarn/nm-local-dir,file:///data6/yarn/nm-local-dir,file:///data7/yarn/nm-local-dir,file:///data8/yarn/nm-local-dir,file:///data9/yarn/nm-local-dir,file:///data10/yarn/nm-local-dir,file:///data11/yarn/nm-local-dir</value>
<description>
An application's localized file directory will be found in: ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}.
</description>
</property>
<!-- 配置多块磁盘目录,用逗号隔开 -->
<property>
<name>yarn.nodemanager.log-dirs</name>
<value>file:///data0/yarn/userlogs,file:///data1/yarn/userlogs,file:///data2/yarn/userlogs,file:///data3/yarn/userlogs,file:///data4/yarn/userlogs,file:///data5/yarn/userlogs,file:///data6/yarn/userlogs,file:///data7/yarn/userlogs,file:///data8/yarn/userlogs,file:///data9/yarn/userlogs,file:///data10/yarn/userlogs,file:///data11/yarn/userlogs</value>
<description>
Where to store container logs. Each container directory will contain the files stderr, stdin, and syslog generated by that container.
</description>
</property>
<property>
<name>yarn.nodemanager.log.retain-seconds</name>
<value>10800</value>
<!-- 3days -->
</property>
<!-- hadoop hdfs目录 -->
<property>
<description>Where to aggregate logs</description>
<name>yarn.nodemanager.remote-app-log-dir</name>
<value>/yarn/apps/logs</value>
</property>
<!-- hadoop hdfs目录 -->
<property>
<name>yarn.app.mapreduce.am.staging-dir</name>
<value>/yarn/staging</value>
</property>
<!-- for spark history server log-->
<property>
<name>yarn.log.server.url</name>
<value>http://nx11-15-30.58os.org:19888/jobhistory/logs</value>
</property>
<property>
<name>yarn.resourcemanager.am.max-retries</name>
<value>3</value>
</property>
<property>
<name>yarn.resourcemanager.am.max-attempts</name>
<value>3</value>
</property>
<property>
<description>How long to wait until a node manager is considered dead.</description>
<name>yarn.nm.liveness-monitor.expiry-interval-ms</name>
<value>120000</value>
</property>
<property>
<name>yarn.app.mapreduce.am.scheduler.connection.wait.interval-ms</name>
<value>5000</value>
</property>
<property>
<name>yarn.am.liveness-monitor.expiry-interval-ms</name>
<value>120000</value>
</property>
<property>
<name>yarn.resourcemanager.rm.container-allocation.expiry-interval-ms</name>
<value>120000</value>
</property>
<property>
<description>Amount of physical memory, in MB, that can be allocated for containers.</description>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>102400</value>
</property>
<property>
<description>Number of CPU cores that can be allocated for containers.</description>
<name>yarn.nodemanager.resource.cpu-vcores</name>
<value>32</value>
</property>
<!-- fair scheduler-->
<property>
<name>yarn.resourcemanager.scheduler.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler</value>
</property>
<property>
<name>yarn.scheduler.fair.allocation.file</name>
<value>/opt/soft/zdp/hadoop/etc/hadoop/fair-scheduler.xml</value>
</property>
<property>
<name>yarn.scheduler.fair.user-as-default-queue</name>
<value>false</value>
</property>
<property>
<name>yarn.scheduler.fair.preemption</name>
<value>false</value>
</property>
<property>
<name>yarn.scheduler.fair.sizebasedweight</name>
<value>false</value>
</property>
<property>
<name>yarn.scheduler.fair.assignmultiple</name>
<value>true</value>
</property>
<property>
<name>yarn.scheduler.fair.max.assign</name>
<value>3</value>
</property>
<property>
<name>yarn.scheduler.fair.allow-undeclared-pools</name>
<value>false</value>
</property>
<property>
<name>yarn.scheduler.fair.continuous-scheduling
数据中台的集群规划和搭建
2 浏览量
2023-12-27
21:42:01
上传
评论
收藏 15KB ZIP 举报
数据与后端架构提升之路
- 粉丝: 1w+
- 资源: 42
最新资源
- Unity XR 手势射击控制脚本(适用于任何可手势识别的设备)
- 机械设计全自动电表(NB和IC卡表)控制和上壳装配线sw16可编辑非常好的设计图纸100%好用.zip
- 基于matlab的EAN-13条形码识别系统GUI界面.zip代码53
- matlab基于bp神经网络交通信号标志识别GUI界面13个标志.zip代码54
- 电子万年历答辩实物展示视频mp4格式
- 基于python实现的程序,包括哈希感知算法cvHash,图像切割cvsplit,固定目标检测cvRec(附文档ppt)等
- 计算0-10000之间所有偶数的和
- multiled.zip
- 基于php实现的哈希算法的人脸检索
- 单片机 电子钟 设计报告/课程设计
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈