<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?><!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--><configuration>
<!-- WARNING!!! This file is auto generated for documentation purposes ONLY! -->
<!-- WARNING!!! Any changes you make to this file will be ignored by Hive. -->
<!-- WARNING!!! You must make your changes in hive-site.xml instead. -->
<!-- Hive Execution Parameters -->
<property>
<name>hive.exec.script.wrapper</name>
<value/>
<description/>
</property>
<property>
<name>hive.exec.plan</name>
<value/>
<description/>
</property>
<property>
<name>hive.exec.stagingdir</name>
<value>.hive-staging</value>
<description>Directory name that will be created inside table locations in order to support HDFS encryption. This is replaces ${hive.exec.scratchdir} for query results with the exception of read-only tables. In all cases ${hive.exec.scratchdir} is still used for other temporary files, such as job plans.</description>
</property>
<property>
<name>hive.exec.scratchdir</name>
<value>/opt/data/hive/scratch</value>
<description>HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/<username> is created, with ${hive.scratch.dir.permission}.</description>
</property>
<property>
<name>hive.repl.rootdir</name>
<value>/opt/data/hive/repl/</value>
<description>HDFS root dir for all replication dumps.</description>
</property>
<property>
<name>hive.repl.cm.enabled</name>
<value>false</value>
<description>Turn on ChangeManager, so delete files will go to cmrootdir.</description>
</property>
<property>
<name>hive.repl.cmrootdir</name>
<value>/opt/data/hive/cmroot/</value>
<description>Root dir for ChangeManager, used for deleted files.</description>
</property>
<property>
<name>hive.repl.cm.retain</name>
<value>24h</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is hour if not specified.
Time to retain removed files in cmrootdir.
</description>
</property>
<property>
<name>hive.repl.cm.interval</name>
<value>3600s</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
Inteval for cmroot cleanup thread.
</description>
</property>
<property>
<name>hive.repl.replica.functions.root.dir</name>
<value>/opt/data/hive/repl/functions/</value>
<description>Root directory on the replica warehouse where the repl sub-system will store jars from the primary warehouse</description>
</property>
<property>
<name>hive.repl.approx.max.load.tasks</name>
<value>10000</value>
<description>
Provide an approximation of the maximum number of tasks that should be executed before
dynamically generating the next set of tasks. The number is approximate as Hive
will stop at a slightly higher number, the reason being some events might lead to a
task increment that would cross the specified limit.
</description>
</property>
<property>
<name>hive.repl.partitions.dump.parallelism</name>
<value>100</value>
<description>Number of threads that will be used to dump partition data information during repl dump.</description>
</property>
<property>
<name>hive.repl.dumpdir.clean.freq</name>
<value>0s</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
Frequency at which timer task runs to purge expired dump dirs.
</description>
</property>
<property>
<name>hive.repl.dumpdir.ttl</name>
<value>7d</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is day if not specified.
TTL of dump dirs before cleanup.
</description>
</property>
<property>
<name>hive.repl.dump.metadata.only</name>
<value>false</value>
<description>Indicates whether replication dump only metadata information or data + metadata.</description>
</property>
<property>
<name>hive.repl.dump.include.acid.tables</name>
<value>false</value>
<description>
Indicates if repl dump should include information about ACID tables. It should be
used in conjunction with 'hive.repl.dump.metadata.only' to enable copying of
metadata for acid tables which do not require the corresponding transaction
semantics to be applied on target. This can be removed when ACID table
replication is supported.
</description>
</property>
<property>
<name>hive.repl.bootstrap.dump.open.txn.timeout</name>
<value>1h</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is hour if not specified.
Indicates the timeout for all transactions which are opened before triggering bootstrap REPL DUMP. If these open transactions are not closed within the timeout value, then REPL DUMP will forcefully abort those transactions and continue with bootstrap dump.
</description>
</property>
<property>
<name>hive.repl.add.raw.reserved.namespace</name>
<value>false</value>
<description>
For TDE with same encryption keys on source and target, allow Distcp super user to access
the raw bytes from filesystem without decrypting on source and then encrypting on target.
</description>
</property>
<property>
<name>hive.exec.local.scratchdir</name>
<value>/opt/data/hive/tmp/master</value>
<description>Local scratch space for Hive jobs</description>
</property>
<property>
<name>hive.downloaded.resources.dir</name>
<value>/opt/data/hive/tmp/${hive.session.id}_resources</value>
<description>Temporary local directory for added resources in the remote file system.</description>
</property>
<property>
<name>hive.scratch.dir.permission</name>
<value>700</value>
<description>The permission for the user specific scratch directories that get created.</description>
</property>
<property>
<name>hive.exec.submitviachild</name>
<value>false</value>
<description/>
</property>
<property>
<name>hive.exec.submit.local.task.via.child</name>
<value>true</value>
<description>
Determines whether local tasks (typically mapjoin hashtable generation phase) runs in
separate JVM (true recommended) or not.
Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues.
</description>
</property>
<property>
<name>hive.exec.script.maxerrsize</name>
<value>100000</value>
<description>
Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task).
This prevents runaway scripts from filling logs partitions to capacity
</description>
</property>
<property>
<name>hive.exec.script.allow.partial.consumption</name>
<va
没有合适的资源?快使用搜索试试~ 我知道了~
资源推荐
资源详情
资源评论
收起资源包目录
Docker(Hadoop-3.3.1+HBase-2.4.16+Zookeeper-3.7.1+Hive-3.1.3)配置文件 (202个子文件)
backup-masters 7B
backup-masters 7B
backup-masters 7B
container-executor.cfg 3KB
container-executor.cfg 3KB
container-executor.cfg 3KB
zoo.cfg 1KB
zoo.cfg 1KB
zoo.cfg 1KB
zoo_sample.cfg 1KB
zoo_sample.cfg 1KB
zoo_sample.cfg 1KB
hbase-env.cmd 4KB
hbase-env.cmd 4KB
hbase-env.cmd 4KB
hadoop-env.cmd 4KB
hadoop-env.cmd 4KB
hadoop-env.cmd 4KB
yarn-env.cmd 2KB
yarn-env.cmd 2KB
yarn-env.cmd 2KB
mapred-env.cmd 951B
mapred-env.cmd 951B
mapred-env.cmd 951B
my.cnf 2KB
hadoop-user-functions.sh.example 3KB
hadoop-user-functions.sh.example 3KB
hadoop-user-functions.sh.example 3KB
ssl-server.xml.example 3KB
ssl-server.xml.example 3KB
ssl-server.xml.example 3KB
ssl-client.xml.example 2KB
ssl-client.xml.example 2KB
ssl-client.xml.example 2KB
myid 2B
myid 2B
myid 2B
log4j.properties 13KB
log4j.properties 13KB
log4j.properties 13KB
llap-daemon-log4j2.properties 7KB
llap-daemon-log4j2.properties 7KB
llap-daemon-log4j2.properties 7KB
log4j.properties 6KB
log4j.properties 6KB
log4j.properties 6KB
llap-cli-log4j2.properties 3KB
llap-cli-log4j2.properties 3KB
llap-cli-log4j2.properties 3KB
log4j.properties 3KB
log4j.properties 3KB
log4j.properties 3KB
hadoop-metrics2.properties 3KB
hadoop-metrics2.properties 3KB
hadoop-metrics2.properties 3KB
hive-log4j2.properties 3KB
hive-log4j2.properties 3KB
hive-log4j2.properties 3KB
parquet-logging.properties 3KB
parquet-logging.properties 3KB
parquet-logging.properties 3KB
yarnservice-log4j.properties 3KB
yarnservice-log4j.properties 3KB
yarnservice-log4j.properties 3KB
hive-exec-log4j2.properties 2KB
hive-exec-log4j2.properties 2KB
hive-exec-log4j2.properties 2KB
kms-log4j.properties 2KB
kms-log4j.properties 2KB
kms-log4j.properties 2KB
hadoop-metrics2-hbase.properties 2KB
hadoop-metrics2-hbase.properties 2KB
hadoop-metrics2-hbase.properties 2KB
httpfs-log4j.properties 2KB
httpfs-log4j.properties 2KB
httpfs-log4j.properties 2KB
beeline-log4j2.properties 2KB
beeline-log4j2.properties 2KB
beeline-log4j2.properties 2KB
log4j-hbtop.properties 1KB
log4j-hbtop.properties 1KB
log4j-hbtop.properties 1KB
regionservers 23B
regionservers 23B
regionservers 23B
hadoop-env.sh 17KB
hadoop-env.sh 17KB
hadoop-env.sh 17KB
hbase-env.sh 8KB
hbase-env.sh 8KB
hbase-env.sh 8KB
yarn-env.sh 6KB
yarn-env.sh 6KB
yarn-env.sh 6KB
example.sh 4KB
example.sh 4KB
example.sh 4KB
hive-env.sh 3KB
hive-env.sh 3KB
hive-env.sh 3KB
共 202 条
- 1
- 2
- 3
资源评论
lim_5258
- 粉丝: 12
- 资源: 10
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
最新资源
- XILINXFPGA源码Xilinxspratan3xcs100E(VGAPS2)
- XILINXFPGA源码XilinxSPARTAN-3E入门开发板实例
- XILINXFPGA源码XilinxSdramVerilog和VHDL版本文档
- 物联网智能家居方案-基于Nucleo-STM32L073&机智云(大赛作品,文档齐全,可直接运行)(文档加Matlab源码)
- XILINXFPGA源码XilinxISE9.xFPGACPLD设计源码
- 成都市地图含高新区(高新南区,高新西区),天府新区,东部新区虚拟行政区划
- XILINXFPGA源码XilinxEDK设计试验
- XILINXFPGA源码XilinxEDKMicroBlaze内置USB固件程序
- 基于 django 的视频点播后台管理系统源代码+数据库
- 基于Java的网上医院预约挂号系统的设计与实现(部署视频)-kaic.mp4
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功