|
18 | 18 |
|
19 | 19 | package org.apache.spark.sql.paimon.shims |
20 | 20 |
|
21 | | -import org.apache.hadoop.fs.Path |
22 | 21 | import org.apache.paimon.data.variant.Variant |
23 | 22 | import org.apache.paimon.spark.catalyst.analysis.Spark3ResolutionRules |
24 | 23 | import org.apache.paimon.spark.catalyst.parser.extensions.PaimonSpark3SqlExtensionsParser |
25 | 24 | import org.apache.paimon.spark.data.{Spark3ArrayData, Spark3InternalRow, Spark3InternalRowWithBlob, SparkArrayData, SparkInternalRow} |
26 | 25 | import org.apache.paimon.types.{DataType, RowType} |
| 26 | + |
| 27 | +import org.apache.hadoop.fs.Path |
27 | 28 | import org.apache.spark.sql.SparkSession |
28 | 29 | import org.apache.spark.sql.catalyst.InternalRow |
29 | 30 | import org.apache.spark.sql.catalyst.expressions.{Attribute, Expression} |
30 | 31 | import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression |
31 | 32 | import org.apache.spark.sql.catalyst.parser.ParserInterface |
32 | | -import org.apache.spark.sql.catalyst.plans.logical.MergeRows.Instruction |
33 | 33 | import org.apache.spark.sql.catalyst.plans.logical._ |
| 34 | +import org.apache.spark.sql.catalyst.plans.logical.MergeRows.Instruction |
34 | 35 | import org.apache.spark.sql.catalyst.rules.Rule |
35 | 36 | import org.apache.spark.sql.catalyst.util.ArrayData |
36 | 37 | import org.apache.spark.sql.connector.catalog.{Identifier, Table, TableCatalog} |
@@ -136,29 +137,29 @@ class Spark3Shim extends SparkShim { |
136 | 137 | throw new UnsupportedOperationException() |
137 | 138 |
|
138 | 139 | def createFileIndex( |
139 | | - options: CaseInsensitiveStringMap, |
140 | | - sparkSession: SparkSession, |
141 | | - paths: Seq[String], |
142 | | - userSpecifiedSchema: Option[StructType], |
143 | | - partitionSchema: StructType): PartitioningAwareFileIndex = { |
| 140 | + options: CaseInsensitiveStringMap, |
| 141 | + sparkSession: SparkSession, |
| 142 | + paths: Seq[String], |
| 143 | + userSpecifiedSchema: Option[StructType], |
| 144 | + partitionSchema: StructType): PartitioningAwareFileIndex = { |
144 | 145 |
|
145 | 146 | class PartitionedMetadataLogFileIndex( |
146 | | - sparkSession: SparkSession, |
147 | | - path: Path, |
148 | | - parameters: Map[String, String], |
149 | | - userSpecifiedSchema: Option[StructType], |
150 | | - override val partitionSchema: StructType) |
| 147 | + sparkSession: SparkSession, |
| 148 | + path: Path, |
| 149 | + parameters: Map[String, String], |
| 150 | + userSpecifiedSchema: Option[StructType], |
| 151 | + override val partitionSchema: StructType) |
151 | 152 | extends MetadataLogFileIndex(sparkSession, path, parameters, userSpecifiedSchema) |
152 | 153 |
|
153 | 154 | class PartitionedInMemoryFileIndex( |
154 | | - sparkSession: SparkSession, |
155 | | - rootPathsSpecified: Seq[Path], |
156 | | - parameters: Map[String, String], |
157 | | - userSpecifiedSchema: Option[StructType], |
158 | | - fileStatusCache: FileStatusCache = NoopCache, |
159 | | - userSpecifiedPartitionSpec: Option[PartitionSpec] = None, |
160 | | - metadataOpsTimeNs: Option[Long] = None, |
161 | | - override val partitionSchema: StructType) |
| 155 | + sparkSession: SparkSession, |
| 156 | + rootPathsSpecified: Seq[Path], |
| 157 | + parameters: Map[String, String], |
| 158 | + userSpecifiedSchema: Option[StructType], |
| 159 | + fileStatusCache: FileStatusCache = NoopCache, |
| 160 | + userSpecifiedPartitionSpec: Option[PartitionSpec] = None, |
| 161 | + metadataOpsTimeNs: Option[Long] = None, |
| 162 | + override val partitionSchema: StructType) |
162 | 163 | extends InMemoryFileIndex( |
163 | 164 | sparkSession, |
164 | 165 | rootPathsSpecified, |
|
0 commit comments