<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?><!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--><configuration>
<!-- WARNING!!! This file is auto generated for documentation purposes ONLY! -->
<!-- WARNING!!! Any changes you make to this file will be ignored by Hive. -->
<!-- WARNING!!! You must make your changes in hive-site.xml instead. -->
<!-- Hive Execution Parameters -->
<property>
<name>hive.exec.script.wrapper</name>
<value/>
<description/>
</property>
<property>
<name>hive.exec.plan</name>
<value/>
<description/>
</property>
<property>
<name>hive.exec.stagingdir</name>
<value>.hive-staging</value>
<description>Directory name that will be created inside table locations in order to support HDFS encryption. This is replaces ${hive.exec.scratchdir} for query results with the exception of read-only tables. In all cases ${hive.exec.scratchdir} is still used for other temporary files, such as job plans.</description>
</property>
<property>
<name>hive.exec.scratchdir</name>
<value>/tmp/hive</value>
<description>HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/<username> is created, with ${hive.scratch.dir.permission}.</description>
</property>
<property>
<name>hive.exec.local.scratchdir</name>
<value>${system:java.io.tmpdir}/${system:user.name}</value>
<description>Local scratch space for Hive jobs</description>
</property>
<property>
<name>hive.downloaded.resources.dir</name>
<value>${system:java.io.tmpdir}/${hive.session.id}_resources</value>
<description>Temporary local directory for added resources in the remote file system.</description>
</property>
<property>
<name>hive.scratch.dir.permission</name>
<value>700</value>
<description>The permission for the user specific scratch directories that get created.</description>
</property>
<property>
<name>hive.exec.submitviachild</name>
<value>false</value>
<description/>
</property>
<property>
<name>hive.exec.submit.local.task.via.child</name>
<value>true</value>
<description>
Determines whether local tasks (typically mapjoin hashtable generation phase) runs in
separate JVM (true recommended) or not.
Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues.
</description>
</property>
<property>
<name>hive.exec.script.maxerrsize</name>
<value>100000</value>
<description>
Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task).
This prevents runaway scripts from filling logs partitions to capacity
</description>
</property>
<property>
<name>hive.exec.script.allow.partial.consumption</name>
<value>false</value>
<description>
When enabled, this option allows a user script to exit successfully without consuming
all the data from the standard input.
</description>
</property>
<property>
<name>stream.stderr.reporter.prefix</name>
<value>reporter:</value>
<description>Streaming jobs that log to standard error with this prefix can log counter or status information.</description>
</property>
<property>
<name>stream.stderr.reporter.enabled</name>
<value>true</value>
<description>Enable consumption of status and counter messages for streaming jobs.</description>
</property>
<property>
<name>hive.exec.compress.output</name>
<value>false</value>
<description>
This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed.
The compression codec and other options are determined from Hadoop config variables mapred.output.compress*
</description>
</property>
<property>
<name>hive.exec.compress.intermediate</name>
<value>false</value>
<description>
This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed.
The compression codec and other options are determined from Hadoop config variables mapred.output.compress*
</description>
</property>
<property>
<name>hive.intermediate.compression.codec</name>
<value/>
<description/>
</property>
<property>
<name>hive.intermediate.compression.type</name>
<value/>
<description/>
</property>
<property>
<name>hive.exec.reducers.bytes.per.reducer</name>
<value>256000000</value>
<description>size per reducer.The default is 256Mb, i.e if the input size is 1G, it will use 4 reducers.</description>
</property>
<property>
<name>hive.exec.reducers.max</name>
<value>1009</value>
<description>
max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is
negative, Hive will use this one as the max number of reducers when automatically determine number of reducers.
</description>
</property>
<property>
<name>hive.exec.pre.hooks</name>
<value/>
<description>
Comma-separated list of pre-execution hooks to be invoked for each statement.
A pre-execution hook is specified as the name of a Java class which implements the
org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
</description>
</property>
<property>
<name>hive.exec.post.hooks</name>
<value/>
<description>
Comma-separated list of post-execution hooks to be invoked for each statement.
A post-execution hook is specified as the name of a Java class which implements the
org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
</description>
</property>
<property>
<name>hive.exec.failure.hooks</name>
<value/>
<description>
Comma-separated list of on-failure hooks to be invoked for each statement.
An on-failure hook is specified as the name of Java class which implements the
org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
</description>
</property>
<property>
<name>hive.exec.query.redactor.hooks</name>
<value/>
<description>
Comma-separated list of hooks to be invoked for each query which can
tranform the query before it's placed in the job.xml file. Must be a Java class which
extends from the org.apache.hadoop.hive.ql.hooks.Redactor abstract class.
</description>
</property>
<property>
<name>hive.client.stats.publishers</name>
<value/>
<description>
Comma-separated list of statistics publishers to be invoked on counters on each job.
A client stats publisher is specified as the name of a Java class which implements the
org.apache.hadoop.hive.ql.stats.ClientStatsPublisher interface.
</description>
</property>
<property>
<name>hive.exec.parallel</name>
<value>false</value>
<description>Whether to execute jobs in parallel</description>
</property>
<property>
<name>hive.exec.parallel.thread.number</name>
<value>8</value>
<description>How many jobs at most
没有合适的资源?快使用搜索试试~ 我知道了~
Hive数据库:嵌入、本地、远程部署xml文件
共3个文件
xml:3个
1 下载量 29 浏览量
2024-01-11
17:16:18
上传
评论
收藏 145KB ZIP 举报
温馨提示
内容概要: 该配置文件用于Hive数据库的部署,涵盖了嵌入、本地和远程部署的配置信息。包括Hive的基本配置、元数据存储、Hive服务器等方面的设置,以满足不同部署场景的需求。 适用人群: 数据工程师、数据科学家和数据库管理员 需要在本地或远程环境中部署Hive的技术人员 对Hive数据库配置感兴趣的系统管理员和开发者 使用场景及目标: 嵌入式部署:适用于需要在嵌入式应用程序中使用Hive的开发者。 本地部署:适用于单机或本地网络中进行Hive数据库的开发、测试和学习。 远程部署:适用于需要在分布式环境中进行Hive数据库的实际生产部署。 其他说明: 提供了注释详细的配置项,方便用户理解和修改。 强调安全性配置,如认证、授权等,以保障数据库的安全性。 配置文件中包含了常用的优化选项,提高Hive数据库的性能。 鼓励根据实际需求进行适度的定制化配置,以满足不同业务场景的需求。 在远程部署中,注意网络配置和节点连接信息,确保集群的正常运行。 提供了对应版本的Hive文档链接,以便用户查阅更详细的配置和说明信息。
资源推荐
资源详情
资源评论
收起资源包目录
hive-site.xml.zip (3个子文件)
slave1-hive-site.xml 225KB
slave2-hive-site.xml 224KB
master-hive-site.xml 224KB
共 3 条
- 1
资源评论
小伍_Five
- 粉丝: 451
- 资源: 8
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功