Constructor and Description |
---|
CliSessionState(HiveConf conf) |
Modifier and Type | Method and Description |
---|---|
static void |
ServerUtils.cleanUpScratchDir(HiveConf hiveConf) |
static boolean |
FileUtils.copy(org.apache.hadoop.fs.FileSystem srcFS,
org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.FileSystem dstFS,
org.apache.hadoop.fs.Path dst,
boolean deleteSource,
boolean overwrite,
HiveConf conf)
Copies files between filesystems.
|
static boolean |
FileUtils.isLocalFile(HiveConf conf,
String fileName)
A best effort attempt to determine if if the file is a local file
|
static boolean |
FileUtils.isLocalFile(HiveConf conf,
URI fileUri)
A best effort attempt to determine if if the file is a local file
|
Modifier and Type | Method and Description |
---|---|
static JsonParser |
JsonParserFactory.getParser(HiveConf conf) |
Constructor and Description |
---|
HiveConf(HiveConf other)
Copy constructor
|
Modifier and Type | Field and Description |
---|---|
protected HiveConf |
HiveMetaStoreClient.conf |
Modifier and Type | Method and Description |
---|---|
HiveConf |
HiveMetaStore.HMSHandler.getHiveConf() |
Modifier and Type | Method and Description |
---|---|
static Map<String,String> |
MetaStoreUtils.getMetaStoreSaslProperties(HiveConf conf)
Read and return the meta store Sasl configuration.
|
static IMetaStoreClient |
RetryingMetaStoreClient.getProxy(HiveConf hiveConf) |
static IMetaStoreClient |
RetryingMetaStoreClient.getProxy(HiveConf hiveConf,
Class<?>[] constructorArgTypes,
Object[] constructorArgs,
Map<String,Long> metaCallTimeMap,
String mscClassName)
This constructor is meant for Hive internal use only.
|
static IMetaStoreClient |
RetryingMetaStoreClient.getProxy(HiveConf hiveConf,
Class<?>[] constructorArgTypes,
Object[] constructorArgs,
String mscClassName)
This constructor is meant for Hive internal use only.
|
static RawStore |
RawStoreProxy.getProxy(HiveConf hiveConf,
org.apache.hadoop.conf.Configuration conf,
String rawStoreClassName,
int id) |
static IMetaStoreClient |
RetryingMetaStoreClient.getProxy(HiveConf hiveConf,
HiveMetaHookLoader hookLoader,
Map<String,Long> metaCallTimeMap,
String mscClassName) |
static IMetaStoreClient |
RetryingMetaStoreClient.getProxy(HiveConf hiveConf,
HiveMetaHookLoader hookLoader,
String mscClassName) |
static IHMSHandler |
RetryingHMSHandler.getProxy(HiveConf hiveConf,
IHMSHandler baseHandler,
boolean local) |
boolean |
IMetaStoreClient.isCompatibleWith(HiveConf conf)
Returns whether current client is compatible with conf argument or not
|
boolean |
HiveMetaStoreClient.isCompatibleWith(HiveConf conf) |
static void |
MetaStoreUtils.makeDir(org.apache.hadoop.fs.Path path,
HiveConf hiveConf) |
static IHMSHandler |
HiveMetaStore.newRetryingHMSHandler(IHMSHandler baseHandler,
HiveConf hiveConf) |
static IHMSHandler |
HiveMetaStore.newRetryingHMSHandler(IHMSHandler baseHandler,
HiveConf hiveConf,
boolean local) |
static ThriftHiveMetastore.Iface |
HiveMetaStore.newRetryingHMSHandler(String name,
HiveConf conf,
boolean local) |
void |
MetaStoreThread.setHiveConf(HiveConf conf)
Set the Hive configuration for this thread.
|
static void |
MetaStoreUtils.startMetaStore(int port,
HadoopThriftAuthBridge bridge,
HiveConf hiveConf) |
static void |
HiveMetaStore.startMetaStore(int port,
HadoopThriftAuthBridge bridge,
HiveConf conf)
Start the metastore store.
|
static void |
HiveMetaStore.startMetaStore(int port,
HadoopThriftAuthBridge bridge,
HiveConf conf,
Lock startLock,
Condition startCondition,
AtomicBoolean startedServing)
Start Metastore based on a passed
HadoopThriftAuthBridge |
Constructor and Description |
---|
DefaultMetaStoreFilterHookImpl(HiveConf conf) |
HiveMetaStore.HMSHandler(String name,
HiveConf conf) |
HiveMetaStore.HMSHandler(String name,
HiveConf conf,
boolean init) |
HiveMetaStoreClient(HiveConf conf) |
HiveMetaStoreClient(HiveConf conf,
HiveMetaHookLoader hookLoader) |
MetaStoreSchemaInfo(String hiveHome,
HiveConf hiveConf,
String dbType) |
RawStoreProxy(HiveConf hiveConf,
org.apache.hadoop.conf.Configuration conf,
Class<? extends RawStore> rawStoreClass,
int id) |
RetryingMetaStoreClient(HiveConf hiveConf,
Class<?>[] constructorArgTypes,
Object[] constructorArgs,
Map<String,Long> metaCallTimeMap,
Class<? extends IMetaStoreClient> msClientClass) |
RetryingMetaStoreClient(HiveConf hiveConf,
HiveMetaHookLoader hookLoader,
Map<String,Long> metaCallTimeMap,
Class<? extends IMetaStoreClient> msClientClass) |
Modifier and Type | Field and Description |
---|---|
protected HiveConf |
TxnHandler.conf |
Modifier and Type | Method and Description |
---|---|
static void |
TxnDbUtil.setConfValues(HiveConf conf)
Set up the configuration so it will use the DbTxnManager, concurrency will be set to true,
and the JDBC configs will be set for putting the transaction and lock info in the embedded
metastore.
|
Constructor and Description |
---|
CompactionTxnHandler(HiveConf conf) |
TxnHandler(HiveConf conf) |
Modifier and Type | Method and Description |
---|---|
static Schema |
Driver.getSchema(BaseSemanticAnalyzer sem,
HiveConf conf)
Get a Schema with fields represented with native Hive types
|
Constructor and Description |
---|
Driver(HiveConf conf)
for backwards compatibility with current tests
|
Driver(HiveConf conf,
String userName) |
Modifier and Type | Field and Description |
---|---|
protected HiveConf |
Task.conf |
Modifier and Type | Method and Description |
---|---|
static int |
Utilities.estimateNumberOfReducers(HiveConf conf,
org.apache.hadoop.fs.ContentSummary inputSummary,
MapWork work,
boolean finalMapRed)
Estimate the number of reducers needed for this job, based on job input,
and configuration parameters.
|
static <T extends Serializable> |
TaskFactory.get(Class<T> workClass,
HiveConf conf) |
static <T extends Serializable> |
TaskFactory.get(T work,
HiveConf conf,
Task<? extends Serializable>... tasklist) |
static <T extends Serializable> |
TaskFactory.getAndMakeChild(T work,
HiveConf conf,
Task<? extends Serializable>... tasklist) |
static String |
Utilities.getQualifiedPath(HiveConf conf,
org.apache.hadoop.fs.Path path)
Convert path to qualified path.
|
void |
Task.initialize(HiveConf conf,
QueryPlan queryPlan,
DriverContext driverContext) |
void |
StatsNoJobTask.initialize(HiveConf conf,
QueryPlan queryPlan,
DriverContext driverContext) |
void |
FunctionTask.initialize(HiveConf conf,
QueryPlan queryPlan,
DriverContext ctx) |
void |
FetchTask.initialize(HiveConf conf,
QueryPlan queryPlan,
DriverContext ctx) |
void |
DDLTask.initialize(HiveConf conf,
QueryPlan queryPlan,
DriverContext ctx) |
void |
ConditionalTask.initialize(HiveConf conf,
QueryPlan queryPlan,
DriverContext driverContext) |
void |
ColumnStatsUpdateTask.initialize(HiveConf conf,
QueryPlan queryPlan,
DriverContext ctx) |
void |
ColumnStatsTask.initialize(HiveConf conf,
QueryPlan queryPlan,
DriverContext ctx) |
static boolean |
Utilities.isDefaultNameNode(HiveConf conf)
Checks if current hive script was executed with non-default namenode
|
static boolean |
Utilities.isPerfOrAboveLogging(HiveConf conf)
Checks if the current HiveServer2 logging operation level is >= PERFORMANCE.
|
static void |
Utilities.reworkMapRedWork(Task<? extends Serializable> task,
boolean reworkMapredWork,
HiveConf conf)
The check here is kind of not clean.
|
Constructor and Description |
---|
ArchiveUtils.HarPathHelper(HiveConf hconf,
URI archive,
URI originalBase)
Creates helper for archive.
|
SecureCmdDoAs(HiveConf conf) |
Modifier and Type | Method and Description |
---|---|
static String |
ExecDriver.generateCmdLine(HiveConf hconf,
Context ctx)
Given a Hive Configuration object - generate a command line fragment for passing such
configuration information to ExecDriver.
|
void |
MapredLocalTask.initialize(HiveConf conf,
QueryPlan queryPlan,
DriverContext driverContext) |
void |
ExecDriver.initialize(HiveConf conf,
QueryPlan queryPlan,
DriverContext driverContext)
Initialization when invoked from QL.
|
static String |
MapRedTask.isEligibleForLocalMode(HiveConf conf,
int numReducers,
long inputLength,
long inputFileCount)
Find out if a job can be run in local mode based on it's characteristics
|
Modifier and Type | Method and Description |
---|---|
static HiveSparkClient |
HiveSparkClientFactory.createHiveSparkClient(HiveConf hiveconf) |
static SparkSession |
SparkUtilities.getSparkSession(HiveConf conf,
SparkSessionManager sparkSessionManager) |
void |
SparkTask.initialize(HiveConf conf,
QueryPlan queryPlan,
DriverContext driverContext) |
static Map<String,String> |
HiveSparkClientFactory.initiateSparkConf(HiveConf hiveConf) |
static URI |
SparkUtilities.uploadToHDFS(URI source,
HiveConf conf)
Uploads a local file to HDFS
|
Modifier and Type | Method and Description |
---|---|
HiveConf |
SparkSessionImpl.getConf() |
HiveConf |
SparkSession.getConf() |
Modifier and Type | Method and Description |
---|---|
SparkSession |
SparkSessionManagerImpl.getSession(SparkSession existingSession,
HiveConf conf,
boolean doOpen)
If the existingSession can be reused return it.
|
SparkSession |
SparkSessionManager.getSession(SparkSession existingSession,
HiveConf conf,
boolean doOpen)
Get a valid SparkSession.
|
void |
SparkSessionImpl.open(HiveConf conf) |
void |
SparkSession.open(HiveConf conf)
Initializes a Spark session for DAG execution.
|
void |
SparkSessionManagerImpl.setup(HiveConf hiveConf) |
void |
SparkSessionManager.setup(HiveConf hiveConf)
Initialize based on given configuration.
|
Constructor and Description |
---|
LocalSparkJobMonitor(HiveConf hiveConf,
SparkJobStatus sparkJobStatus) |
RemoteSparkJobMonitor(HiveConf hiveConf,
RemoteSparkJobStatus sparkJobStatus) |
Constructor and Description |
---|
LocalSparkJobRef(String jobId,
HiveConf hiveConf,
LocalSparkJobStatus sparkJobStatus,
org.apache.spark.api.java.JavaSparkContext javaSparkContext) |
RemoteSparkJobRef(HiveConf hiveConf,
JobHandle<Serializable> jobHandler,
RemoteSparkJobStatus sparkJobStatus) |
Modifier and Type | Method and Description |
---|---|
HiveConf |
TezSessionState.getConf() |
Modifier and Type | Method and Description |
---|---|
void |
TezSessionPoolManager.closeAndOpen(TezSessionState sessionState,
HiveConf conf,
boolean keepTmpDir) |
void |
TezSessionPoolManager.closeAndOpen(TezSessionState sessionState,
HiveConf conf,
String[] additionalFiles,
boolean keepTmpDir) |
org.apache.hadoop.mapred.JobConf |
DagUtils.createConfiguration(HiveConf hiveConf)
Creates and initializes a JobConf object that can be used to execute
the DAG.
|
TezSessionState |
TezSessionPoolManager.getSession(TezSessionState session,
HiveConf conf,
boolean doOpen) |
TezSessionState |
TezSessionPoolManager.getSession(TezSessionState session,
HiveConf conf,
boolean doOpen,
boolean forceCreate) |
int |
TezJobMonitor.monitorExecution(org.apache.tez.dag.api.client.DAGClient dagClient,
HiveTxnManager txnMgr,
HiveConf conf,
org.apache.tez.dag.api.DAG dag)
monitorExecution handles status printing, failures during execution and final status retrieval.
|
void |
TezSessionState.open(HiveConf conf) |
void |
TezSessionState.open(HiveConf conf,
String[] additionalFiles)
Creates a tez session.
|
void |
TezSessionState.refreshLocalResourcesFromConf(HiveConf conf) |
void |
TezSessionPoolManager.setupPool(HiveConf conf) |
Modifier and Type | Method and Description |
---|---|
HiveConf |
HookContext.getConf() |
Modifier and Type | Method and Description |
---|---|
static <T extends Hook> |
HookUtils.getHooks(HiveConf conf,
HiveConf.ConfVars hookConfVar,
Class<T> clazz)
Returns the hooks specified in a configuration variable.
|
static String |
HookUtils.redactLogString(HiveConf conf,
String logString) |
void |
HookContext.setConf(HiveConf conf) |
Constructor and Description |
---|
HookContext(QueryPlan queryPlan,
HiveConf conf,
Map<String,org.apache.hadoop.fs.ContentSummary> inputPathToContentSummary,
String userName,
String ipAddress,
String operationId) |
Modifier and Type | Method and Description |
---|---|
boolean |
HiveIndexHandler.checkQuerySize(long inputSize,
HiveConf conf)
Check the size of an input query to make sure it fits within the bounds
|
boolean |
AbstractIndexHandler.checkQuerySize(long inputSize,
HiveConf conf) |
protected void |
TableBasedIndexHandler.setStatsDir(HiveConf builderConf) |
Modifier and Type | Method and Description |
---|---|
boolean |
BitmapIndexHandler.checkQuerySize(long querySize,
HiveConf hiveConf) |
Modifier and Type | Method and Description |
---|---|
boolean |
CompactIndexHandler.checkQuerySize(long querySize,
HiveConf hiveConf) |
Modifier and Type | Method and Description |
---|---|
static boolean |
HiveFileFormatUtils.checkInputFormat(org.apache.hadoop.fs.FileSystem fs,
HiveConf conf,
Class<? extends org.apache.hadoop.mapred.InputFormat> inputFormatCls,
ArrayList<org.apache.hadoop.fs.FileStatus> files)
checks if files are in same format as the given input format.
|
void |
SymbolicInputFormat.rework(HiveConf job,
MapredWork work) |
void |
ReworkMapredInputFormat.rework(HiveConf job,
MapredWork work) |
boolean |
VectorizedRCFileInputFormat.validateInput(org.apache.hadoop.fs.FileSystem fs,
HiveConf conf,
ArrayList<org.apache.hadoop.fs.FileStatus> files) |
boolean |
SequenceFileInputFormatChecker.validateInput(org.apache.hadoop.fs.FileSystem fs,
HiveConf conf,
ArrayList<org.apache.hadoop.fs.FileStatus> files) |
boolean |
RCFileInputFormat.validateInput(org.apache.hadoop.fs.FileSystem fs,
HiveConf conf,
ArrayList<org.apache.hadoop.fs.FileStatus> files) |
boolean |
InputFormatChecker.validateInput(org.apache.hadoop.fs.FileSystem fs,
HiveConf conf,
ArrayList<org.apache.hadoop.fs.FileStatus> files)
This method is used to validate the input files.
|
Modifier and Type | Method and Description |
---|---|
void |
MergeFileTask.initialize(HiveConf conf,
QueryPlan queryPlan,
DriverContext driverContext) |
void |
MergeFileWork.resolveConcatenateMerge(HiveConf conf)
alter table ...
|
void |
MergeFileWork.resolveDynamicPartitionStoredAsSubDirsMerge(HiveConf conf,
org.apache.hadoop.fs.Path path,
TableDesc tblDesc,
ArrayList<String> aliases,
PartitionDesc partDesc) |
Modifier and Type | Method and Description |
---|---|
boolean |
VectorizedOrcInputFormat.validateInput(org.apache.hadoop.fs.FileSystem fs,
HiveConf conf,
ArrayList<org.apache.hadoop.fs.FileStatus> files) |
boolean |
OrcInputFormat.validateInput(org.apache.hadoop.fs.FileSystem fs,
HiveConf conf,
ArrayList<org.apache.hadoop.fs.FileStatus> files) |
Modifier and Type | Method and Description |
---|---|
void |
PartialScanTask.initialize(HiveConf conf,
QueryPlan queryPlan,
DriverContext driverContext) |
Modifier and Type | Method and Description |
---|---|
void |
ColumnTruncateTask.initialize(HiveConf conf,
QueryPlan queryPlan,
DriverContext driverContext) |
Modifier and Type | Method and Description |
---|---|
HiveConf |
HiveLockManagerCtx.getConf() |
Modifier and Type | Method and Description |
---|---|
List<HiveLock> |
EmbeddedLockManager.getLocks(boolean verifyTablePartitions,
boolean fetchData,
HiveConf conf) |
List<HiveLock> |
EmbeddedLockManager.getLocks(HiveLockObject key,
boolean verifyTablePartitions,
boolean fetchData,
HiveConf conf) |
HiveTxnManager |
TxnManagerFactory.getTxnManager(HiveConf conf)
Create a new transaction manager.
|
void |
HiveLockManagerCtx.setConf(HiveConf conf) |
Constructor and Description |
---|
HiveLockManagerCtx(HiveConf conf) |
Modifier and Type | Method and Description |
---|---|
static org.apache.curator.framework.CuratorFramework |
CuratorFrameworkSingleton.getInstance(HiveConf hiveConf) |
static void |
ZooKeeperHiveLockManager.releaseAllLocks(HiveConf conf) |
Modifier and Type | Method and Description |
---|---|
HiveConf |
Hive.getConf() |
Modifier and Type | Method and Description |
---|---|
protected static void |
Hive.copyFiles(HiveConf conf,
org.apache.hadoop.fs.Path srcf,
org.apache.hadoop.fs.Path destf,
org.apache.hadoop.fs.FileSystem fs,
boolean isSrcLocal,
boolean isAcid,
List<org.apache.hadoop.fs.Path> newFiles)
Copy files.
|
static Hive |
Hive.get(HiveConf c)
Gets hive object for the current thread.
|
static Hive |
Hive.get(HiveConf c,
boolean needsRefresh)
get a connection to metastore.
|
static HiveIndexHandler |
HiveUtils.getIndexHandler(HiveConf conf,
String indexHandlerClass) |
boolean |
Hive.getPartitionsByExpr(Table tbl,
ExprNodeGenericFuncDesc expr,
HiveConf conf,
List<Partition> result)
Get a list of Partitions by expr.
|
static boolean |
Table.hasMetastoreBasedSchema(HiveConf conf,
StorageDescriptor serde) |
static boolean |
Table.hasMetastoreBasedSchema(HiveConf conf,
String serdeLib) |
static boolean |
Hive.moveFile(HiveConf conf,
org.apache.hadoop.fs.Path srcf,
org.apache.hadoop.fs.Path destf,
org.apache.hadoop.fs.FileSystem fs,
boolean replace,
boolean isSrcLocal) |
protected static void |
Hive.replaceFiles(org.apache.hadoop.fs.Path tablePath,
org.apache.hadoop.fs.Path srcf,
org.apache.hadoop.fs.Path destf,
org.apache.hadoop.fs.Path oldPath,
HiveConf conf,
boolean isSrcLocal)
Replaces files in the partition with new data set specified by srcf.
|
Modifier and Type | Method and Description |
---|---|
static MetaDataFormatter |
MetaDataFormatUtils.getFormatter(HiveConf conf) |
void |
MetaDataFormatter.showTableStatus(DataOutputStream out,
Hive db,
HiveConf conf,
List<Table> tbls,
Map<String,String> part,
Partition par)
Show the table status.
|
void |
JsonMetaDataFormatter.showTableStatus(DataOutputStream out,
Hive db,
HiveConf conf,
List<Table> tbls,
Map<String,String> part,
Partition par) |
Modifier and Type | Method and Description |
---|---|
HiveConf |
GroupByOptimizer.GroupByOptimizerContext.getConf() |
HiveConf |
GenMRProcContext.getConf() |
HiveConf |
BucketJoinProcCtx.getConf() |
Modifier and Type | Method and Description |
---|---|
static void |
GenMapRedUtils.addDependentMoveTasks(Task<MoveWork> mvTask,
HiveConf hconf,
Task<? extends Serializable> parentTask,
DependencyCollectionTask dependencyTask)
Adds the dependencyTaskForMultiInsert in ctx as a dependent of parentTask.
|
static void |
GenMapRedUtils.addStatsTask(FileSinkOperator nd,
MoveTask mvTask,
Task<? extends Serializable> currTask,
HiveConf hconf)
Add the StatsTask as a dependent task of the MoveTask
because StatsTask will change the Table/Partition metadata.
|
protected void |
GroupByOptimizer.SortGroupByProcessor.convertGroupByMapSideSortedGroupBy(HiveConf conf,
GroupByOperator groupByOp,
int depth) |
static MapJoinOperator |
MapJoinProcessor.convertJoinOpMapJoinOp(HiveConf hconf,
JoinOperator op,
boolean leftInputJoin,
String[] baseSrc,
List<String> mapAliases,
int mapJoinPos,
boolean noCheckOuterJoin) |
MapJoinOperator |
SparkMapJoinProcessor.convertMapJoin(HiveConf conf,
JoinOperator op,
boolean leftSrc,
String[] baseSrc,
List<String> mapAliases,
int bigTablePos,
boolean noCheckOuterJoin,
boolean validateMapJoinTree)
convert a regular join to a a map-side join.
|
MapJoinOperator |
MapJoinProcessor.convertMapJoin(HiveConf conf,
JoinOperator op,
boolean leftInputJoin,
String[] baseSrc,
List<String> mapAliases,
int mapJoinPos,
boolean noCheckOuterJoin,
boolean validateMapJoinTree)
convert a regular join to a a map-side join.
|
static MapJoinOperator |
MapJoinProcessor.convertSMBJoinToMapJoin(HiveConf hconf,
SMBMapJoinOperator smbJoinOp,
int bigTablePos,
boolean noCheckOuterJoin)
convert a sortmerge join to a a map-side join.
|
static ConditionalTask |
GenMapRedUtils.createCondTask(HiveConf conf,
Task<? extends Serializable> currTask,
MoveWork mvWork,
Serializable mergeWork,
String inputPath)
Construct a conditional task given the current leaf task, the MoveWork and the MapredWork.
|
static org.apache.hadoop.fs.Path |
GenMapRedUtils.createMoveTask(Task<? extends Serializable> currTask,
boolean chDir,
FileSinkOperator fsOp,
ParseContext parseCtx,
List<Task<MoveWork>> mvTasks,
HiveConf hconf,
DependencyCollectionTask dependencyTask)
Create and add any dependent move tasks
|
static void |
GenMapRedUtils.createMRWorkForMergingFiles(FileSinkOperator fsInput,
org.apache.hadoop.fs.Path finalName,
DependencyCollectionTask dependencyTask,
List<Task<MoveWork>> mvTasks,
HiveConf conf,
Task<? extends Serializable> currTask) |
static Task<?> |
IndexUtils.createRootTask(HiveConf builderConf,
Set<ReadEntity> inputs,
Set<WriteEntity> outputs,
StringBuilder command,
LinkedHashMap<String,String> partSpec,
String indexTableName,
String dbName) |
static void |
MapJoinProcessor.genMapJoinOpAndLocalWork(HiveConf conf,
MapredWork newWork,
JoinOperator op,
int mapJoinPos)
Convert the join to a map-join and also generate any local work needed.
|
static MapJoinDesc |
MapJoinProcessor.getMapJoinDesc(HiveConf hconf,
JoinOperator op,
boolean leftInputJoin,
String[] baseSrc,
List<String> mapAliases,
int mapJoinPos,
boolean noCheckOuterJoin) |
static MapredWork |
GenMapRedUtils.getMapRedWorkFromConf(HiveConf conf)
create a new plan and return.
|
protected long |
SizeBasedBigTableSelectorForAutoSMJ.getSize(HiveConf conf,
Partition partition) |
protected long |
SizeBasedBigTableSelectorForAutoSMJ.getSize(HiveConf conf,
Table table) |
void |
Optimizer.initialize(HiveConf hiveConf)
Create the list of transformations.
|
static boolean |
GenMapRedUtils.isMergeRequired(List<Task<MoveWork>> mvTasks,
HiveConf hconf,
FileSinkOperator fsOp,
Task<? extends Serializable> currTask,
boolean isInsertTable)
Returns true iff the fsOp requires a merge
|
static void |
GenMapRedUtils.linkMoveTask(FileSinkOperator newOutput,
ConditionalTask cndTsk,
List<Task<MoveWork>> mvTasks,
HiveConf hconf,
DependencyCollectionTask dependencyTask)
Make the move task in the GenMRProcContext following the FileSinkOperator a dependent of all
possible subtrees branching from the ConditionalTask.
|
static void |
GenMapRedUtils.linkMoveTask(Task<MoveWork> mvTask,
Task<? extends Serializable> task,
HiveConf hconf,
DependencyCollectionTask dependencyTask)
Follows the task tree down from task and makes all leaves parents of mvTask
|
void |
GroupByOptimizer.GroupByOptimizerContext.setConf(HiveConf conf) |
void |
GenMRProcContext.setConf(HiveConf conf) |
static void |
GenMapRedUtils.setMapWork(MapWork plan,
ParseContext parseCtx,
Set<ReadEntity> inputs,
PrunedPartitionList partsList,
Operator<? extends OperatorDesc> topOp,
String alias_id,
HiveConf conf,
boolean local)
initialize MapWork
|
Constructor and Description |
---|
BucketJoinProcCtx(HiveConf conf) |
GenMRProcContext(HiveConf conf,
HashMap<Operator<? extends OperatorDesc>,Task<? extends Serializable>> opTaskMap,
ParseContext parseCtx,
List<Task<MoveWork>> mvTask,
List<Task<? extends Serializable>> rootTasks,
LinkedHashMap<Operator<? extends OperatorDesc>,GenMRProcContext.GenMapRedCtx> mapCurrCtx,
Set<ReadEntity> inputs,
Set<WriteEntity> outputs) |
GroupByOptimizer.GroupByOptimizerContext(HiveConf conf) |
SortBucketJoinProcCtx(HiveConf conf) |
TezBucketJoinProcCtx(HiveConf conf) |
Modifier and Type | Method and Description |
---|---|
void |
RelOptHiveTable.computePartitionList(HiveConf conf,
org.apache.calcite.rex.RexNode pruneNode) |
Constructor and Description |
---|
HiveDefaultRelMetadataProvider(HiveConf hiveConf) |
RelOptHiveTable(org.apache.calcite.plan.RelOptSchema calciteSchema,
String qualifiedTblName,
org.apache.calcite.rel.type.RelDataType rowType,
Table hiveTblMetadata,
List<ColumnInfo> hiveNonPartitionCols,
List<ColumnInfo> hivePartitionCols,
List<VirtualColumn> hiveVirtualCols,
HiveConf hconf,
Map<String,PrunedPartitionList> partitionCache,
AtomicInteger noColsMissingStats) |
Modifier and Type | Method and Description |
---|---|
static HiveOnTezCostModel |
HiveOnTezCostModel.getCostModel(HiveConf conf) |
Constructor and Description |
---|
HivePartitionPruneRule(HiveConf conf) |
Constructor and Description |
---|
HiveOpConverter(SemanticAnalyzer semanticAnalyzer,
HiveConf hiveConf,
UnparseTranslator unparseTranslator,
Map<String,Operator<? extends OperatorDesc>> topOps,
boolean strictMode) |
Modifier and Type | Method and Description |
---|---|
static Operator<? extends OperatorDesc> |
RewriteParseContextGenerator.generateOperatorTree(HiveConf conf,
String command)
Parse the input
String command and generate an operator tree. |
Modifier and Type | Method and Description |
---|---|
HiveConf |
AnnotateOpTraitsProcCtx.getConf() |
Modifier and Type | Method and Description |
---|---|
void |
AnnotateOpTraitsProcCtx.setConf(HiveConf conf) |
Modifier and Type | Field and Description |
---|---|
protected HiveConf |
PhysicalContext.conf |
Modifier and Type | Method and Description |
---|---|
HiveConf |
PhysicalContext.getConf() |
Modifier and Type | Method and Description |
---|---|
static List<Task> |
StageIDsRearranger.getExplainOrder(HiveConf conf,
List<Task<?>> tasks) |
void |
PhysicalContext.setConf(HiveConf conf) |
static boolean |
GenMRSkewJoinProcessor.skewJoinEnabled(HiveConf conf,
JoinOperator joinOp) |
Constructor and Description |
---|
PhysicalContext(HiveConf conf,
ParseContext parseContext,
Context context,
List<Task<? extends Serializable>> rootTasks,
Task<? extends Serializable> fetchTask) |
PhysicalOptimizer(PhysicalContext pctx,
HiveConf hiveConf) |
Modifier and Type | Method and Description |
---|---|
static PrunedPartitionList |
PartitionPruner.prune(Table tab,
ExprNodeDesc prunerExpr,
HiveConf conf,
String alias,
Map<String,PrunedPartitionList> prunedPartitionsMap)
Get the partition list for the table that satisfies the partition pruner
condition.
|
Modifier and Type | Method and Description |
---|---|
HiveConf |
AnnotateStatsProcCtx.getConf() |
Modifier and Type | Method and Description |
---|---|
void |
AnnotateStatsProcCtx.setConf(HiveConf conf) |
Modifier and Type | Field and Description |
---|---|
protected HiveConf |
TaskCompiler.conf |
HiveConf |
OptimizeTezProcContext.conf |
HiveConf |
GenTezProcContext.conf |
protected HiveConf |
BaseSemanticAnalyzer.conf |
Modifier and Type | Method and Description |
---|---|
HiveConf |
ParseContext.getConf() |
Modifier and Type | Method and Description |
---|---|
protected ListBucketingCtx |
BaseSemanticAnalyzer.constructListBucketingCtx(List<String> skewedColNames,
List<List<String>> skewedValues,
Map<List<String>,String> skewedColValueLocationMaps,
boolean isStoredAsSubDirectories,
HiveConf conf)
Construct list bucketing context.
|
protected static Hive |
BaseSemanticAnalyzer.createHiveDB(HiveConf conf) |
static BaseSemanticAnalyzer |
SemanticAnalyzerFactory.get(HiveConf conf,
ASTNode tree) |
static TaskCompiler |
TaskCompilerFactory.getCompiler(HiveConf conf,
ParseContext parseContext)
Returns the appropriate compiler to translate the operator tree
into executable units.
|
static HashMap<String,String> |
DDLSemanticAnalyzer.getValidatedPartSpec(Table table,
ASTNode astNode,
HiveConf conf,
boolean shouldBeFull) |
void |
TezCompiler.init(HiveConf conf,
SessionState.LogHelper console,
Hive db) |
void |
TaskCompiler.init(HiveConf conf,
SessionState.LogHelper console,
Hive db) |
WindowingSpec |
WindowingComponentizer.next(HiveConf hCfg,
SemanticAnalyzer semAly,
UnparseTranslator unparseT,
RowResolver inputRR) |
static String |
EximUtil.relativeToAbsolutePath(HiveConf conf,
String location) |
void |
ParseContext.setConf(HiveConf conf) |
String |
VariableSubstitution.substitute(HiveConf conf,
String expr) |
PTFDesc |
PTFTranslator.translate(PTFInvocationSpec qSpec,
SemanticAnalyzer semAly,
HiveConf hCfg,
RowResolver inputRR,
UnparseTranslator unparseT) |
PTFDesc |
PTFTranslator.translate(WindowingSpec wdwSpec,
SemanticAnalyzer semAly,
HiveConf hCfg,
RowResolver inputRR,
UnparseTranslator unparseT) |
static void |
BaseSemanticAnalyzer.validatePartColumnType(Table tbl,
Map<String,String> partSpec,
ASTNode astNode,
HiveConf conf) |
static void |
BaseSemanticAnalyzer.validatePartSpec(Table tbl,
Map<String,String> partSpec,
ASTNode astNode,
HiveConf conf,
boolean shouldBeFull) |
Constructor and Description |
---|
HiveAuthorizationTaskFactoryImpl(HiveConf conf,
Hive db) |
Modifier and Type | Field and Description |
---|---|
HiveConf |
GenSparkProcContext.conf |
Modifier and Type | Method and Description |
---|---|
HiveConf |
OptimizeSparkProcContext.getConf() |
Constructor and Description |
---|
GenSparkProcContext(HiveConf conf,
ParseContext parseContext,
List<Task<MoveWork>> moveTask,
List<Task<? extends Serializable>> rootTasks,
Set<ReadEntity> inputs,
Set<WriteEntity> outputs,
Map<String,Operator<? extends OperatorDesc>> topOps) |
OptimizeSparkProcContext(HiveConf conf,
ParseContext parseContext,
Set<ReadEntity> inputs,
Set<WriteEntity> outputs,
Deque<Operator<? extends OperatorDesc>> rootOperators) |
Modifier and Type | Method and Description |
---|---|
HiveConf |
TezEdgeProperty.getHiveConf() |
Modifier and Type | Method and Description |
---|---|
List<Task<? extends Serializable>> |
ConditionalResolverSkewJoin.getTasks(HiveConf conf,
Object objCtx) |
List<Task<? extends Serializable>> |
ConditionalResolverMergeFiles.getTasks(HiveConf conf,
Object objCtx) |
List<Task<? extends Serializable>> |
ConditionalResolverCommonJoin.getTasks(HiveConf conf,
Object objCtx) |
List<Task<? extends Serializable>> |
ConditionalResolver.getTasks(HiveConf conf,
Object ctx)
All conditional resolvers implement this interface.
|
void |
MapWork.resolveDynamicPartitionStoredAsSubDirsMerge(HiveConf conf,
org.apache.hadoop.fs.Path path,
TableDesc tblDesc,
ArrayList<String> aliases,
PartitionDesc partDesc) |
protected Task<? extends Serializable> |
ConditionalResolverCommonJoin.resolveMapJoinTask(ConditionalResolverCommonJoin.ConditionalResolverCommonJoinCtx ctx,
HiveConf conf) |
protected void |
ConditionalResolverCommonJoin.resolveUnknownSizes(ConditionalResolverCommonJoin.ConditionalResolverCommonJoinCtx ctx,
HiveConf conf) |
void |
CreateTableDesc.validate(HiveConf conf) |
Constructor and Description |
---|
TezEdgeProperty(HiveConf hiveConf,
TezEdgeProperty.EdgeType edgeType,
boolean isAutoReduce,
int minReducer,
int maxReducer,
long bytesPerReducer) |
TezEdgeProperty(HiveConf hiveConf,
TezEdgeProperty.EdgeType edgeType,
int buckets) |
Modifier and Type | Method and Description |
---|---|
static void |
CommandProcessorFactory.clean(HiveConf conf) |
static CommandProcessor |
CommandProcessorFactory.get(String[] cmd,
HiveConf conf) |
static CommandProcessor |
CommandProcessorFactory.getForHiveCommand(String[] cmd,
HiveConf conf) |
static CommandProcessor |
CommandProcessorFactory.getForHiveCommandInternal(String[] cmd,
HiveConf conf,
boolean testOnly) |
Constructor and Description |
---|
CryptoProcessor(HadoopShims.HdfsEncryptionShim encryptionShim,
HiveConf conf) |
Modifier and Type | Method and Description |
---|---|
void |
HiveV1Authorizer.applyAuthorizationConfigPolicy(HiveConf hiveConf) |
void |
HiveAuthorizerImpl.applyAuthorizationConfigPolicy(HiveConf hiveConf) |
void |
HiveAuthorizer.applyAuthorizationConfigPolicy(HiveConf hiveConf)
Modify the given HiveConf object to configure authorization related parameters
or other parameters related to hive security
|
void |
HiveAccessController.applyAuthorizationConfigPolicy(HiveConf hiveConf) |
HiveAuthorizer |
HiveAuthorizerFactory.createHiveAuthorizer(HiveMetastoreClientFactory metastoreClientFactory,
HiveConf conf,
HiveAuthenticationProvider hiveAuthenticator,
HiveAuthzSessionContext ctx)
Create a new instance of HiveAuthorizer, initialized with the given objects.
|
static void |
SettableConfigUpdater.setHiveConfWhiteList(HiveConf hiveConf) |
Constructor and Description |
---|
AuthorizationMetaStoreFilterHook(HiveConf conf) |
HiveV1Authorizer(HiveConf conf,
Hive hive) |
Modifier and Type | Method and Description |
---|---|
void |
SQLStdHiveAccessControllerWrapper.applyAuthorizationConfigPolicy(HiveConf hiveConf) |
void |
SQLStdHiveAccessController.applyAuthorizationConfigPolicy(HiveConf hiveConf) |
HiveAuthorizer |
SQLStdHiveAuthorizerFactory.createHiveAuthorizer(HiveMetastoreClientFactory metastoreClientFactory,
HiveConf conf,
HiveAuthenticationProvider authenticator,
HiveAuthzSessionContext ctx) |
HiveAuthorizer |
SQLStdConfOnlyAuthorizerFactory.createHiveAuthorizer(HiveMetastoreClientFactory metastoreClientFactory,
HiveConf conf,
HiveAuthenticationProvider authenticator,
HiveAuthzSessionContext ctx) |
static RequiredPrivileges |
SQLAuthorizationUtils.getPrivilegesFromFS(org.apache.hadoop.fs.Path filePath,
HiveConf conf,
String userName)
Map permissions for this uri to SQL Standard privileges
|
Constructor and Description |
---|
SQLStdHiveAccessController(HiveMetastoreClientFactory metastoreClientFactory,
HiveConf conf,
HiveAuthenticationProvider authenticator,
HiveAuthzSessionContext ctx) |
SQLStdHiveAccessControllerWrapper(HiveMetastoreClientFactory metastoreClientFactory,
HiveConf conf,
HiveAuthenticationProvider authenticator,
HiveAuthzSessionContext ctx) |
SQLStdHiveAuthorizationValidator(HiveMetastoreClientFactory metastoreClientFactory,
HiveConf conf,
HiveAuthenticationProvider authenticator,
SQLStdHiveAccessControllerWrapper privilegeManager,
HiveAuthzSessionContext ctx) |
Modifier and Type | Field and Description |
---|---|
protected HiveConf |
SessionState.conf
current configuration.
|
Modifier and Type | Method and Description |
---|---|
HiveConf |
SessionState.getConf() |
static HiveConf |
SessionState.getSessionConf() |
Modifier and Type | Method and Description |
---|---|
static CreateTableAutomaticGrant |
CreateTableAutomaticGrant.create(HiveConf conf) |
HiveTxnManager |
SessionState.initTxnMgr(HiveConf conf)
Initialize the transaction manager.
|
void |
SessionState.setConf(HiveConf conf) |
static SessionState |
SessionState.start(HiveConf conf)
start a new session and set it to current session.
|
Constructor and Description |
---|
OperationLog(String name,
File file,
HiveConf hiveConf) |
SessionState(HiveConf conf) |
SessionState(HiveConf conf,
String userName) |
Modifier and Type | Method and Description |
---|---|
static Statistics |
StatsUtils.collectStatistics(HiveConf conf,
PrunedPartitionList partList,
Table table,
List<ColumnInfo> schema,
List<String> neededColumns,
List<String> referencedColumns,
boolean fetchColStats,
boolean fetchPartStats) |
static Statistics |
StatsUtils.collectStatistics(HiveConf conf,
PrunedPartitionList partList,
Table table,
TableScanOperator tableScanOperator)
Collect table, partition and column level statistics
|
static int |
StatsUtils.estimateRowSizeFromSchema(HiveConf conf,
List<ColumnInfo> schema,
List<String> neededColumns) |
static long |
StatsUtils.getAvgColLenOfVariableLengthTypes(HiveConf conf,
ObjectInspector oi,
String colType)
Get the raw data size of variable length data types
|
static ColStatistics |
StatsUtils.getColStatisticsFromExpression(HiveConf conf,
Statistics parentStats,
ExprNodeDesc end)
Get column statistics expression nodes
|
static List<ColStatistics> |
StatsUtils.getColStatisticsFromExprMap(HiveConf conf,
Statistics parentStats,
Map<String,ExprNodeDesc> colExprMap,
RowSchema rowSchema)
Get column statistics from parent statistics.
|
static List<Long> |
StatsUtils.getFileSizeForPartitions(HiveConf conf,
List<Partition> parts)
Find the bytes on disks occupied by list of partitions
|
static long |
StatsUtils.getFileSizeForTable(HiveConf conf,
Table table)
Find the bytes on disk occupied by a table
|
static long |
StatsUtils.getSizeOfComplexTypes(HiveConf conf,
ObjectInspector oi)
Get the size of complex data types
|
Modifier and Type | Method and Description |
---|---|
void |
TableFunctionResolver.initialize(HiveConf cfg,
PTFDesc ptfDesc,
PartitionedTableFunctionDef tDef) |
Modifier and Type | Method and Description |
---|---|
static String |
ZooKeeperHiveHelper.getQuorumServers(HiveConf conf)
Get the ensemble server addresses from the configuration.
|
Modifier and Type | Method and Description |
---|---|
HiveConf |
HiveSchemaTool.getHiveConf() |
Modifier and Type | Method and Description |
---|---|
static Connection |
HiveSchemaHelper.getConnectionToMetastore(String userName,
String password,
boolean printInfo,
HiveConf hiveConf)
Get JDBC connection to metastore db
|
static HiveSchemaHelper.NestedScriptParser |
HiveSchemaHelper.getDbCommandParser(String dbName,
String dbOpts,
String msUsername,
String msPassword,
HiveConf hiveConf) |
static String |
HiveSchemaHelper.getValidConfVar(HiveConf.ConfVars confVar,
HiveConf hiveConf) |
Constructor and Description |
---|
HiveSchemaHelper.DerbyCommandParser(String dbOpts,
String msUsername,
String msPassword,
HiveConf hiveConf) |
HiveSchemaHelper.MSSQLCommandParser(String dbOpts,
String msUsername,
String msPassword,
HiveConf hiveConf) |
HiveSchemaHelper.MySqlCommandParser(String dbOpts,
String msUsername,
String msPassword,
HiveConf hiveConf) |
HiveSchemaHelper.OracleCommandParser(String dbOpts,
String msUsername,
String msPassword,
HiveConf hiveConf) |
HiveSchemaHelper.PostgresCommandParser(String dbOpts,
String msUsername,
String msPassword,
HiveConf hiveConf) |
HiveSchemaTool(String hiveHome,
HiveConf hiveConf,
String dbType) |
Constructor and Description |
---|
QFileClient(HiveConf hiveConf,
String hiveRootDirectory,
String qFileDirectory,
String outputDirectory,
String expectedDirectory) |
Modifier and Type | Method and Description |
---|---|
static HiveCompat.CompatLevel |
HiveCompat.getCompatLevel(HiveConf hconf)
Returned the configured compatibility level
|
Modifier and Type | Method and Description |
---|---|
static HiveConf |
HCatUtil.getHiveConf(org.apache.hadoop.conf.Configuration conf) |
Modifier and Type | Method and Description |
---|---|
static HiveMetaStoreClient |
HCatUtil.getHiveClient(HiveConf hiveConf)
Deprecated.
|
static IMetaStoreClient |
HCatUtil.getHiveMetastoreClient(HiveConf hiveConf)
Get or create a hive client depending on whether it exits in cache or not
|
Modifier and Type | Field and Description |
---|---|
protected static HiveConf |
MessageFactory.hiveConf |
Modifier and Type | Method and Description |
---|---|
protected LazySimpleSerDe |
DelimitedInputWriter.createSerde(Table tbl,
HiveConf conf)
Creates LazySimpleSerde
|
StreamingConnection |
HiveEndPoint.newConnection(boolean createPartIfNotExists,
HiveConf conf)
Acquire a new connection to MetaStore for streaming
|
StreamingConnection |
HiveEndPoint.newConnection(boolean createPartIfNotExists,
HiveConf conf,
org.apache.hadoop.security.UserGroupInformation authenticatedUser)
Acquire a new connection to MetaStore for streaming.
|
Constructor and Description |
---|
DelimitedInputWriter(String[] colNamesForFields,
String delimiter,
HiveEndPoint endPoint,
HiveConf conf)
Constructor.
|
DelimitedInputWriter(String[] colNamesForFields,
String delimiter,
HiveEndPoint endPoint,
HiveConf conf,
char serdeSeparator)
Constructor.
|
StrictJsonWriter(HiveEndPoint endPoint,
HiveConf conf) |
StrictRegexWriter(HiveEndPoint endPoint,
HiveConf conf) |
StrictRegexWriter(String regex,
HiveEndPoint endPoint,
HiveConf conf) |
Modifier and Type | Method and Description |
---|---|
HiveConf |
Service.getHiveConf()
Get the configuration of this service.
|
HiveConf |
FilterService.getHiveConf() |
HiveConf |
AbstractService.getHiveConf() |
Modifier and Type | Method and Description |
---|---|
static void |
ServiceOperations.deploy(Service service,
HiveConf configuration)
Initialize then start a service.
|
void |
Service.init(HiveConf conf)
Initialize the service.
|
void |
FilterService.init(HiveConf config) |
void |
CompositeService.init(HiveConf hiveConf) |
void |
BreakableService.init(HiveConf conf) |
void |
AbstractService.init(HiveConf hiveConf)
Initialize the service.
|
static void |
ServiceOperations.init(Service service,
HiveConf configuration)
Initialize a service.
|
Modifier and Type | Method and Description |
---|---|
static void |
HiveAuthFactory.loginFromKeytab(HiveConf hiveConf) |
static org.apache.hadoop.security.UserGroupInformation |
HiveAuthFactory.loginFromSpnegoKeytabAndReturnUGI(HiveConf hiveConf) |
static void |
HiveAuthFactory.verifyProxyAccess(String realUser,
String proxyUser,
String ipAddress,
HiveConf hiveConf) |
Constructor and Description |
---|
HiveAuthFactory(HiveConf conf) |
Modifier and Type | Method and Description |
---|---|
void |
CLIService.init(HiveConf hiveConf) |
Modifier and Type | Method and Description |
---|---|
HiveConf |
Operation.getConfiguration() |
Modifier and Type | Method and Description |
---|---|
void |
OperationManager.init(HiveConf hiveConf) |
void |
SQLOperation.prepare(HiveConf sqlOperationConf)
Compile the query and extract metadata
|
void |
Operation.setConfiguration(HiveConf configuration) |
Modifier and Type | Method and Description |
---|---|
HiveConf |
HiveSessionImpl.getHiveConf() |
HiveConf |
HiveSessionBase.getHiveConf() |
HiveConf |
HiveSessionHookContextImpl.getSessionConf() |
HiveConf |
HiveSessionHookContext.getSessionConf()
Retrieve session conf
|
Modifier and Type | Method and Description |
---|---|
void |
SessionManager.init(HiveConf hiveConf) |
Constructor and Description |
---|
HiveSessionImpl(TProtocolVersion protocol,
String username,
String password,
HiveConf serverhiveConf,
String ipAddress) |
HiveSessionImplwithUGI(TProtocolVersion protocol,
String username,
String password,
HiveConf hiveConf,
String ipAddress,
String delegationToken) |
Modifier and Type | Field and Description |
---|---|
protected HiveConf |
ThriftCLIService.hiveConf |
Modifier and Type | Method and Description |
---|---|
void |
ThriftCLIService.init(HiveConf hiveConf) |
void |
EmbeddedThriftBinaryCLIService.init(HiveConf hiveConf) |
Modifier and Type | Method and Description |
---|---|
void |
HiveServer2.init(HiveConf hiveConf) |
static boolean |
HiveServer2.isHTTPTransportMode(HiveConf hiveConf) |
Modifier and Type | Method and Description |
---|---|
static SparkClient |
SparkClientFactory.createClient(Map<String,String> sparkConf,
HiveConf hiveConf)
Instantiates a new Spark client.
|
Modifier and Type | Method and Description |
---|---|
static String |
RpcConfiguration.getValue(HiveConf conf,
String key)
Utility method for a given RpcConfiguration key, to convert value to millisecond if it is a time value,
and return as string in either case.
|
Copyright © 2017 The Apache Software Foundation. All rights reserved.