public class InsertIntoHiveDirCommand extends org.apache.spark.sql.catalyst.plans.logical.LogicalPlan implements SaveAsHiveFile, scala.Product, scala.Serializable
query to file system.
The syntax of using this command in SQL is:
INSERT OVERWRITE [LOCAL] DIRECTORY
path
[ROW FORMAT row_format]
[STORED AS file_format]
SELECT ...
param: isLocal whether the path specified in storage is a local directory
param: storage storage format used to describe how the query result is stored.
param: query the logical plan representing data to write to
param: overwrite whether overwrites existing directory
| Constructor and Description |
|---|
InsertIntoHiveDirCommand(boolean isLocal,
org.apache.spark.sql.catalyst.catalog.CatalogStorageFormat storage,
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan query,
boolean overwrite,
scala.collection.Seq<String> outputColumnNames) |
| Modifier and Type | Method and Description |
|---|---|
abstract static R |
apply(T1 v1,
T2 v2,
T3 v3,
T4 v4,
T5 v5) |
scala.Option<org.apache.hadoop.fs.Path> |
createdTempDir() |
boolean |
isLocal() |
scala.collection.immutable.Map<String,org.apache.spark.sql.execution.metric.SQLMetric> |
metrics() |
scala.collection.Seq<String> |
outputColumnNames() |
boolean |
overwrite() |
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan |
query() |
scala.collection.Seq<Row> |
run(SparkSession sparkSession,
org.apache.spark.sql.execution.SparkPlan child) |
org.apache.spark.sql.catalyst.catalog.CatalogStorageFormat |
storage() |
static String |
toString() |
analyzed, assertNotAnalysisRule, childrenResolved, clone, constraints, constructIsNotNullConstraints, inferAdditionalConstraints, initializeForcefully, initializeLogIfNecessary, initializeLogIfNecessary, initializeLogIfNecessary$default$2, invalidateStatsCache, isStreaming, isTraceEnabled, log, logDebug, logDebug, logError, logError, logInfo, logInfo, logName, logTrace, logTrace, logWarning, logWarning, maxRows, maxRowsPerPartition, org$apache$spark$internal$Logging$$log__$eq, org$apache$spark$internal$Logging$$log_, org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$_analyzed_$eq, org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$_analyzed, outputOrdering, refresh, resolve, resolve, resolveChildren, resolved, resolveExpressions, resolveOperators, resolveOperatorsDown, resolveOperatorsUp, resolveOperatorsUpWithNewOutput, resolveQuoted, sameOutput, setAnalyzed, statePrefix, stats, statsCache_$eq, statsCache, transformAllExpressions, transformDown, transformUp, validConstraints, verboseStringWithSuffixallAttributes, append, append$default$5, append$default$6, canonicalized, CODEGEN_ID_TAG, collectWithSubqueries, conf, doCanonicalize, expressions, findExpressionAndTrackLineageDown, formattedNodeName, innerChildren, inputSet, isCanonicalizedPlan, mapExpressions, missingInput, normalizeExpressions, normalizePredicates, OP_ID_TAG, org$apache$spark$sql$catalyst$plans$QueryPlan$$updateAttr, org$apache$spark$sql$catalyst$plans$QueryPlan$$updateOuterReferencesInSubquery, output, outputSet, printSchema, producedAttributes, references, sameResult, schema, schemaString, semanticHash, simpleString, simpleStringWithNodeId, subqueries, subqueriesAll, transformExpressions, transformExpressionsDown, transformExpressionsUp, transformUpWithNewOutput, transformUpWithNewOutput$default$2, verboseString, verboseStringWithOperatorIdapply, argString, asCode, children, collect, collectFirst, collectLeaves, containsChild, copyTagsFrom, fastEquals, find, flatMap, foreach, foreachUp, generateTreeString, generateTreeString$default$5, generateTreeString$default$6, getTagValue, hashCode, jsonFields, makeCopy, map, mapChildren, mapProductIterator, nodeName, numberedTreeString, origin, otherCopyArgs, p, prettyJson, productIterator, productPrefix, setTagValue, stringArgs, toJSON, toString, transform, treeString, treeString, treeString, treeString$default$2, treeString$default$3, treeString$default$4, unsetTagValue, withNewChildrendeleteExternalTmpPath, executionId, getExternalScratchDir, getExternalTmpPath, getExtTmpPathRelTo, getStagingDir, isSubDir, newVersionExternalTempPath, oldVersionExternalTempPath, saveAsHiveFile$init$, basicWriteJobStatsTracker, children, logicalPlanOutputWithNames, outputColumns$init$, output, stats$init$, productArity, productElement, productIterator, productPrefix$init$, allowInvokingTransformsInAnalyzer, markInAnalyzer$init$$init$public InsertIntoHiveDirCommand(boolean isLocal,
org.apache.spark.sql.catalyst.catalog.CatalogStorageFormat storage,
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan query,
boolean overwrite,
scala.collection.Seq<String> outputColumnNames)
public abstract static R apply(T1 v1,
T2 v2,
T3 v3,
T4 v4,
T5 v5)
public static String toString()
public scala.Option<org.apache.hadoop.fs.Path> createdTempDir()
createdTempDir in interface SaveAsHiveFilepublic scala.collection.immutable.Map<String,org.apache.spark.sql.execution.metric.SQLMetric> metrics()
metrics in interface org.apache.spark.sql.execution.command.DataWritingCommandpublic boolean isLocal()
public org.apache.spark.sql.catalyst.catalog.CatalogStorageFormat storage()
public org.apache.spark.sql.catalyst.plans.logical.LogicalPlan query()
query in interface org.apache.spark.sql.execution.command.DataWritingCommandpublic boolean overwrite()
public scala.collection.Seq<String> outputColumnNames()
outputColumnNames in interface org.apache.spark.sql.execution.command.DataWritingCommandpublic scala.collection.Seq<Row> run(SparkSession sparkSession, org.apache.spark.sql.execution.SparkPlan child)
run in interface org.apache.spark.sql.execution.command.DataWritingCommand