Skip to content

Commit 2d1d82b

Browse files
committed
initial commit - phase 3a
1 parent 590b0d5 commit 2d1d82b

File tree

11 files changed

+900
-57
lines changed

11 files changed

+900
-57
lines changed

sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/ParquetVectorUpdaterFactory.java

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,17 @@ public class ParquetVectorUpdaterFactory {
7171
}
7272

7373
public ParquetVectorUpdater getUpdater(ColumnDescriptor descriptor, DataType sparkType) {
74+
// Types Framework: framework FIRST, original switch as fallback.
75+
ParquetVectorUpdater frameworkUpdater =
76+
org.apache.spark.sql.execution.datasources.parquet.types.ops.ParquetTypeOps
77+
.getVectorUpdaterOrNull(sparkType, descriptor, logicalTypeAnnotation);
78+
if (frameworkUpdater != null) return frameworkUpdater;
79+
80+
return getUpdaterDefault(descriptor, sparkType);
81+
}
82+
83+
private ParquetVectorUpdater getUpdaterDefault(
84+
ColumnDescriptor descriptor, DataType sparkType) {
7485
PrimitiveType type = descriptor.getPrimitiveType();
7586
PrimitiveType.PrimitiveTypeName typeName = type.getPrimitiveTypeName();
7687
boolean isUnknownType = type.getLogicalTypeAnnotation() instanceof UnknownLogicalTypeAnnotation;

sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,19 @@ public VectorizedColumnReader(
150150
private boolean isLazyDecodingSupported(
151151
PrimitiveType.PrimitiveTypeName typeName,
152152
DataType sparkType) {
153+
// Types Framework: framework FIRST, original switch as fallback.
154+
// Returns boxed Boolean: null = not handled, non-null = framework result.
155+
Boolean frameworkResult =
156+
org.apache.spark.sql.execution.datasources.parquet.types.ops.ParquetTypeOps
157+
.isLazyDecodingSupportedFor(sparkType, typeName, logicalTypeAnnotation);
158+
if (frameworkResult != null) return frameworkResult;
159+
160+
return isLazyDecodingSupportedDefault(typeName, sparkType);
161+
}
162+
163+
private boolean isLazyDecodingSupportedDefault(
164+
PrimitiveType.PrimitiveTypeName typeName,
165+
DataType sparkType) {
153166
boolean isSupported = false;
154167
// Don't use lazy dictionary decoding if the column needs extra processing: upcasting or date
155168
// rebasing.

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFileFormat.scala

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ import org.apache.spark.sql.catalyst.types.DataTypeUtils.toAttributes
4949
import org.apache.spark.sql.catalyst.util.{DateTimeUtils, RebaseDateTime}
5050
import org.apache.spark.sql.errors.QueryExecutionErrors
5151
import org.apache.spark.sql.execution.datasources._
52+
import org.apache.spark.sql.execution.datasources.parquet.types.ops.ParquetTypeOps
5253
import org.apache.spark.sql.execution.vectorized.{ConstantColumnVector, OffHeapColumnVector, OnHeapColumnVector}
5354
import org.apache.spark.sql.internal.{SessionStateHelper, SQLConf}
5455
import org.apache.spark.sql.internal.SQLConf._
@@ -411,7 +412,12 @@ class ParquetFileFormat
411412
}
412413
}
413414

414-
override def supportDataType(dataType: DataType): Boolean = dataType match {
415+
override def supportDataType(dataType: DataType): Boolean =
416+
// Types Framework: framework FIRST, original match as fallback.
417+
ParquetTypeOps(dataType).map(_.supportDataType)
418+
.getOrElse(supportDataTypeDefault(dataType))
419+
420+
private def supportDataTypeDefault(dataType: DataType): Boolean = dataType match {
415421
// GeoSpatial data types in Parquet are limited only to types with supported SRIDs.
416422
case g: GeometryType => GeometryType.isSridSupported(g.srid)
417423
case g: GeographyType => GeographyType.isSridSupported(g.srid)

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilters.scala

Lines changed: 57 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ import org.apache.parquet.schema.Type.Repetition
3939

4040
import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, DateTimeUtils, IntervalUtils}
4141
import org.apache.spark.sql.catalyst.util.RebaseDateTime.{rebaseGregorianToJulianDays, rebaseGregorianToJulianMicros, RebaseSpec}
42+
import org.apache.spark.sql.execution.datasources.parquet.types.ops.{ParquetFilterOp, ParquetTypeOps}
4243
import org.apache.spark.sql.internal.LegacyBehaviorPolicy
4344
import org.apache.spark.sql.sources
4445
import org.apache.spark.unsafe.types.UTF8String
@@ -218,7 +219,31 @@ class ParquetFilters(
218219
case l => l.asInstanceOf[JLong]
219220
}
220221

221-
private val makeEq:
222+
// ==================== Types Framework: Filter Dispatch ====================
223+
// Custom extractor defined inside ParquetFilters because ParquetSchemaType is a
224+
// private inner class - code that destructures it must live in this class scope.
225+
// Moving it outside breaks compilation. See Risk #6 in the Phase 3a plan.
226+
private object FrameworkFilterOps {
227+
def unapply(pst: ParquetSchemaType): Option[ParquetTypeOps] = {
228+
val ParquetSchemaType(ann, prim, _) = pst
229+
ParquetTypeOps.findByParquetFilter(ann, prim)
230+
}
231+
}
232+
233+
// Single helper that creates a framework-first PartialFunction for any comparison op.
234+
private def frameworkComparisonFilter(op: ParquetFilterOp)
235+
: PartialFunction[ParquetSchemaType, (Array[String], Any) => FilterPredicate] = {
236+
case FrameworkFilterOps(ops) =>
237+
(n: Array[String], v: Any) => ops.makeFilterPredicate(op, n, v)
238+
}
239+
240+
private def frameworkInFilter
241+
: PartialFunction[ParquetSchemaType, (Array[String], Array[Any]) => FilterPredicate] = {
242+
case FrameworkFilterOps(ops) =>
243+
(n: Array[String], values: Array[Any]) => ops.makeInFilterPredicate(n, values)
244+
}
245+
246+
private val makeEqDefault:
222247
PartialFunction[ParquetSchemaType, (Array[String], Any) => FilterPredicate] = {
223248
case ParquetBooleanType =>
224249
(n: Array[String], v: Any) => FilterApi.eq(booleanColumn(n), v.asInstanceOf[JBoolean])
@@ -272,7 +297,7 @@ class ParquetFilters(
272297
Option(v).map(d => decimalToByteArray(d.asInstanceOf[JBigDecimal], length)).orNull)
273298
}
274299

275-
private val makeNotEq:
300+
private val makeNotEqDefault:
276301
PartialFunction[ParquetSchemaType, (Array[String], Any) => FilterPredicate] = {
277302
case ParquetBooleanType =>
278303
(n: Array[String], v: Any) => FilterApi.notEq(booleanColumn(n), v.asInstanceOf[JBoolean])
@@ -325,7 +350,7 @@ class ParquetFilters(
325350
Option(v).map(d => decimalToByteArray(d.asInstanceOf[JBigDecimal], length)).orNull)
326351
}
327352

328-
private val makeLt:
353+
private val makeLtDefault:
329354
PartialFunction[ParquetSchemaType, (Array[String], Any) => FilterPredicate] = {
330355
case ParquetByteType | ParquetShortType | ParquetIntegerType =>
331356
(n: Array[String], v: Any) => FilterApi.lt(intColumn(n), toIntValue(v))
@@ -364,7 +389,7 @@ class ParquetFilters(
364389
FilterApi.lt(binaryColumn(n), decimalToByteArray(v.asInstanceOf[JBigDecimal], length))
365390
}
366391

367-
private val makeLtEq:
392+
private val makeLtEqDefault:
368393
PartialFunction[ParquetSchemaType, (Array[String], Any) => FilterPredicate] = {
369394
case ParquetByteType | ParquetShortType | ParquetIntegerType =>
370395
(n: Array[String], v: Any) => FilterApi.ltEq(intColumn(n), toIntValue(v))
@@ -403,7 +428,7 @@ class ParquetFilters(
403428
FilterApi.ltEq(binaryColumn(n), decimalToByteArray(v.asInstanceOf[JBigDecimal], length))
404429
}
405430

406-
private val makeGt:
431+
private val makeGtDefault:
407432
PartialFunction[ParquetSchemaType, (Array[String], Any) => FilterPredicate] = {
408433
case ParquetByteType | ParquetShortType | ParquetIntegerType =>
409434
(n: Array[String], v: Any) => FilterApi.gt(intColumn(n), toIntValue(v))
@@ -442,7 +467,7 @@ class ParquetFilters(
442467
FilterApi.gt(binaryColumn(n), decimalToByteArray(v.asInstanceOf[JBigDecimal], length))
443468
}
444469

445-
private val makeGtEq:
470+
private val makeGtEqDefault:
446471
PartialFunction[ParquetSchemaType, (Array[String], Any) => FilterPredicate] = {
447472
case ParquetByteType | ParquetShortType | ParquetIntegerType =>
448473
(n: Array[String], v: Any) => FilterApi.gtEq(intColumn(n), toIntValue(v))
@@ -481,7 +506,7 @@ class ParquetFilters(
481506
FilterApi.gtEq(binaryColumn(n), decimalToByteArray(v.asInstanceOf[JBigDecimal], length))
482507
}
483508

484-
private val makeInPredicate:
509+
private val makeInPredicateDefault:
485510
PartialFunction[ParquetSchemaType, (Array[String], Array[Any]) => FilterPredicate] = {
486511

487512
case ParquetByteType | ParquetShortType | ParquetIntegerType =>
@@ -592,6 +617,18 @@ class ParquetFilters(
592617
FilterApi.in(binaryColumn(n), set)
593618
}
594619

620+
// Types Framework: composed filter vals - framework FIRST, existing as fallback.
621+
private val makeEq = frameworkComparisonFilter(ParquetFilterOp.Eq).orElse(makeEqDefault)
622+
private val makeNotEq =
623+
frameworkComparisonFilter(ParquetFilterOp.NotEq).orElse(makeNotEqDefault)
624+
private val makeLt = frameworkComparisonFilter(ParquetFilterOp.Lt).orElse(makeLtDefault)
625+
private val makeLtEq =
626+
frameworkComparisonFilter(ParquetFilterOp.LtEq).orElse(makeLtEqDefault)
627+
private val makeGt = frameworkComparisonFilter(ParquetFilterOp.Gt).orElse(makeGtDefault)
628+
private val makeGtEq =
629+
frameworkComparisonFilter(ParquetFilterOp.GtEq).orElse(makeGtEqDefault)
630+
private val makeInPredicate = frameworkInFilter.orElse(makeInPredicateDefault)
631+
595632
// Returns filters that can be pushed down when reading Parquet files.
596633
def convertibleFilters(filters: Seq[sources.Filter]): Seq[sources.Filter] = {
597634
filters.flatMap(convertibleFiltersHelper(_, canPartialPushDown = true))
@@ -642,7 +679,19 @@ class ParquetFilters(
642679
// Parquet's type in the given file should be matched to the value's type
643680
// in the pushed filter in order to push down the filter to Parquet.
644681
private def valueCanMakeFilterOn(name: String, value: Any): Boolean = {
645-
value == null || (nameToParquetField(name).fieldType match {
682+
if (value == null) return true
683+
// Types Framework: framework FIRST, original match as fallback.
684+
// Extract fieldType once and pass to *Default to avoid double lookup.
685+
val fieldType = nameToParquetField(name).fieldType
686+
val ParquetSchemaType(ann, prim, _) = fieldType
687+
ParquetTypeOps.findByParquetFilter(ann, prim)
688+
.map(_.isFilterableValue(value))
689+
.getOrElse(valueCanMakeFilterOnDefault(fieldType, value))
690+
}
691+
692+
private def valueCanMakeFilterOnDefault(
693+
fieldType: ParquetSchemaType, value: Any): Boolean = {
694+
value == null || (fieldType match {
646695
case ParquetBooleanType => value.isInstanceOf[JBoolean]
647696
case ParquetIntegerType if value.isInstanceOf[Period] => true
648697
case ParquetByteType | ParquetShortType | ParquetIntegerType => value match {

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetReadSupport.scala

Lines changed: 42 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ import org.apache.spark.sql.catalyst.InternalRow
3636
import org.apache.spark.sql.catalyst.util.RebaseDateTime.RebaseSpec
3737
import org.apache.spark.sql.errors.QueryExecutionErrors
3838
import org.apache.spark.sql.execution.datasources.VariantMetadata
39+
import org.apache.spark.sql.execution.datasources.parquet.types.ops.ParquetTypeOps
3940
import org.apache.spark.sql.internal.{LegacyBehaviorPolicy, SQLConf}
4041
import org.apache.spark.sql.types._
4142

@@ -215,32 +216,15 @@ object ParquetReadSupport extends Logging {
215216
caseSensitive: Boolean,
216217
useFieldId: Boolean,
217218
returnNullStructIfAllFieldsMissing: Boolean): Type = {
218-
val newParquetType = catalystType match {
219-
case t: ArrayType if ParquetSchemaConverter.isComplexType(t.elementType) =>
220-
// Only clips array types with nested type as element type.
221-
clipParquetListType(parquetType.asGroupType(), t.elementType, caseSensitive, useFieldId,
222-
returnNullStructIfAllFieldsMissing)
223-
224-
case t: MapType
225-
if ParquetSchemaConverter.isComplexType(t.keyType) ||
226-
ParquetSchemaConverter.isComplexType(t.valueType) =>
227-
// Only clips map types with nested key type or value type
228-
clipParquetMapType(
229-
parquetType.asGroupType(), t.keyType, t.valueType, caseSensitive, useFieldId,
230-
returnNullStructIfAllFieldsMissing)
231-
232-
case t: StructType if VariantMetadata.isVariantStruct(t) =>
233-
clipVariantSchema(parquetType.asGroupType(), t, returnNullStructIfAllFieldsMissing)
234-
235-
case t: StructType =>
236-
clipParquetGroup(parquetType.asGroupType(), t, caseSensitive, useFieldId,
237-
returnNullStructIfAllFieldsMissing)
238-
239-
case _ =>
240-
// UDTs and primitive types are not clipped. For UDTs, a clipped version might not be able
241-
// to be mapped to desired user-space types. So UDTs shouldn't participate schema merging.
242-
parquetType
243-
}
219+
// Types Framework: framework FIRST for struct-backed types that declare
220+
// parquetStructSchema. Primitive framework types (parquetStructSchema = None)
221+
// fall through to *Default which returns parquetType unchanged.
222+
val newParquetType = ParquetTypeOps(catalystType)
223+
.flatMap(_.parquetStructSchema)
224+
.map(st => clipParquetGroup(parquetType.asGroupType(), st, caseSensitive, useFieldId,
225+
returnNullStructIfAllFieldsMissing))
226+
.getOrElse(clipParquetTypeDefault(parquetType, catalystType, caseSensitive, useFieldId,
227+
returnNullStructIfAllFieldsMissing))
244228

245229
if (useFieldId && parquetType.getId != null) {
246230
newParquetType.withId(parquetType.getId.intValue())
@@ -249,6 +233,38 @@ object ParquetReadSupport extends Logging {
249233
}
250234
}
251235

236+
private def clipParquetTypeDefault(
237+
parquetType: Type,
238+
catalystType: DataType,
239+
caseSensitive: Boolean,
240+
useFieldId: Boolean,
241+
returnNullStructIfAllFieldsMissing: Boolean): Type = catalystType match {
242+
case t: ArrayType if ParquetSchemaConverter.isComplexType(t.elementType) =>
243+
// Only clips array types with nested type as element type.
244+
clipParquetListType(parquetType.asGroupType(), t.elementType, caseSensitive, useFieldId,
245+
returnNullStructIfAllFieldsMissing)
246+
247+
case t: MapType
248+
if ParquetSchemaConverter.isComplexType(t.keyType) ||
249+
ParquetSchemaConverter.isComplexType(t.valueType) =>
250+
// Only clips map types with nested key type or value type
251+
clipParquetMapType(
252+
parquetType.asGroupType(), t.keyType, t.valueType, caseSensitive, useFieldId,
253+
returnNullStructIfAllFieldsMissing)
254+
255+
case t: StructType if VariantMetadata.isVariantStruct(t) =>
256+
clipVariantSchema(parquetType.asGroupType(), t, returnNullStructIfAllFieldsMissing)
257+
258+
case t: StructType =>
259+
clipParquetGroup(parquetType.asGroupType(), t, caseSensitive, useFieldId,
260+
returnNullStructIfAllFieldsMissing)
261+
262+
case _ =>
263+
// UDTs and primitive types are not clipped. For UDTs, a clipped version might not be able
264+
// to be mapped to desired user-space types. So UDTs shouldn't participate schema merging.
265+
parquetType
266+
}
267+
252268
/**
253269
* Clips a Parquet [[GroupType]] which corresponds to a Catalyst [[ArrayType]]. The element type
254270
* of the [[ArrayType]] should also be a nested type, namely an [[ArrayType]], a [[MapType]], or a

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetRowConverter.scala

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ import org.apache.spark.sql.catalyst.util.ResolveDefaultColumns._
4141
import org.apache.spark.sql.errors.QueryCompilationErrors
4242
import org.apache.spark.sql.errors.QueryExecutionErrors
4343
import org.apache.spark.sql.execution.datasources.{DataSourceUtils, VariantMetadata}
44+
import org.apache.spark.sql.execution.datasources.parquet.types.ops.ParquetTypeOps
4445
import org.apache.spark.sql.internal.SQLConf
4546
import org.apache.spark.sql.types._
4647
import org.apache.spark.unsafe.types.{GeographyVal, GeometryVal, UTF8String, VariantVal}
@@ -306,6 +307,20 @@ private[parquet] class ParquetRowConverter(
306307
parquetType: Type,
307308
catalystType: DataType,
308309
updater: ParentContainerUpdater): Converter with HasParentContainerUpdater = {
310+
// Types Framework: framework FIRST, original match as fallback.
311+
// Passes all ParquetRowConverter constructor params to the extended newConverter overload
312+
// so struct-backed types can create recursive converters.
313+
ParquetTypeOps(catalystType)
314+
.map(_.newConverter(
315+
parquetType, updater, schemaConverter, convertTz,
316+
datetimeRebaseSpec, int96RebaseSpec))
317+
.getOrElse(newConverterDefault(parquetType, catalystType, updater))
318+
}
319+
320+
private def newConverterDefault(
321+
parquetType: Type,
322+
catalystType: DataType,
323+
updater: ParentContainerUpdater): Converter with HasParentContainerUpdater = {
309324

310325
def isUnsignedIntTypeMatched(bitWidth: Int): Boolean = {
311326
parquetType.getLogicalTypeAnnotation match {

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetSchemaConverter.scala

Lines changed: 19 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import org.apache.parquet.schema.Type.Repetition._
3030
import org.apache.spark.sql.AnalysisException
3131
import org.apache.spark.sql.errors.QueryCompilationErrors
3232
import org.apache.spark.sql.execution.datasources.VariantMetadata
33+
import org.apache.spark.sql.execution.datasources.parquet.types.ops.ParquetTypeOps
3334
import org.apache.spark.sql.internal.SQLConf
3435
import org.apache.spark.sql.types._
3536
import org.apache.spark.sql.types.{EdgeInterpolationAlgorithm => SparkEdgeInterpolationAlgorithm}
@@ -331,7 +332,9 @@ class ParquetToSparkSchemaConverter(
331332
if time.getUnit == TimeUnit.MICROS && !time.isAdjustedToUTC =>
332333
TimeType(TimeType.MICROS_PRECISION)
333334
case _ =>
334-
illegalType()
335+
// Types Framework: reverse lookup for framework types.
336+
ParquetTypeOps.fromParquetPrimitive(INT64, typeAnnotation)
337+
.getOrElse(illegalType())
335338
}
336339

337340
case INT96 =>
@@ -501,7 +504,13 @@ class ParquetToSparkSchemaConverter(
501504
valueContainsNull = valueOptional),
502505
groupColumn, Seq(convertedKey, convertedValue))
503506
case _ =>
504-
throw QueryCompilationErrors.unrecognizedParquetTypeError(field.toString)
507+
// Types Framework: reverse lookup for framework group types.
508+
ParquetTypeOps.fromParquetGroup(
509+
field.getLogicalTypeAnnotation
510+
).map { dt =>
511+
ParquetColumn(dt, groupColumn,
512+
(0 until groupColumn.getChildrenCount).map(i => convertField(groupColumn.getChild(i))))
513+
}.getOrElse(throw QueryCompilationErrors.unrecognizedParquetTypeError(field.toString))
505514
}
506515
}
507516

@@ -644,8 +653,15 @@ class SparkToParquetSchemaConverter(
644653
field: StructField,
645654
repetition: Type.Repetition,
646655
inShredded: Boolean): Type = {
656+
// Types Framework: framework FIRST, original match as fallback.
657+
ParquetTypeOps(field.dataType).map(_.convertToParquetType(field.name, repetition))
658+
.getOrElse(convertFieldDefault(field, repetition, inShredded))
659+
}
647660

648-
field.dataType match {
661+
private def convertFieldDefault(
662+
field: StructField,
663+
repetition: Type.Repetition,
664+
inShredded: Boolean): Type = field.dataType match {
649665
// ===================
650666
// Simple atomic types
651667
// ===================
@@ -916,7 +932,6 @@ class SparkToParquetSchemaConverter(
916932
case _ =>
917933
throw QueryCompilationErrors.cannotConvertDataTypeToParquetTypeError(field)
918934
}
919-
}
920935
}
921936

922937
private[sql] object ParquetSchemaConverter {

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetUtils.scala

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@ import org.apache.spark.sql.catalyst.expressions.variant.VariantExpressionEvalUt
4242
import org.apache.spark.sql.catalyst.util.RebaseDateTime.RebaseSpec
4343
import org.apache.spark.sql.connector.expressions.aggregate.{Aggregation, Count, CountStar, Max, Min}
4444
import org.apache.spark.sql.execution.datasources.{AggregatePushDownUtils, OutputWriter, OutputWriterFactory}
45+
import org.apache.spark.sql.execution.datasources.parquet.types.ops.ParquetTypeOps
4546
import org.apache.spark.sql.execution.datasources.v2.V2ColumnUtils
4647
import org.apache.spark.sql.internal.{LegacyBehaviorPolicy, SQLConf}
4748
import org.apache.spark.sql.internal.SQLConf.PARQUET_AGGREGATE_PUSHDOWN_ENABLED
@@ -206,7 +207,12 @@ object ParquetUtils extends Logging {
206207
sqlConf.parquetVectorizedReaderEnabled &&
207208
schema.forall(f => isBatchReadSupported(sqlConf, f.dataType))
208209

209-
def isBatchReadSupported(sqlConf: SQLConf, dt: DataType): Boolean = dt match {
210+
def isBatchReadSupported(sqlConf: SQLConf, dt: DataType): Boolean =
211+
// Types Framework: framework FIRST, original match as fallback.
212+
ParquetTypeOps(dt).map(_.isBatchReadSupported(sqlConf))
213+
.getOrElse(isBatchReadSupportedDefault(sqlConf, dt))
214+
215+
private def isBatchReadSupportedDefault(sqlConf: SQLConf, dt: DataType): Boolean = dt match {
210216
case _: AtomicType =>
211217
true
212218
case _: NullType =>

0 commit comments

Comments
 (0)