From e2ca2c12fab659529f3764fb5f5f0d6481ade3ac Mon Sep 17 00:00:00 2001 From: Maciej Olko Date: Thu, 26 Mar 2026 16:14:15 +0100 Subject: [PATCH] Add Spark 4.0 support via deequ:2.0.14-spark-4.0 - Add "4.0" entry to SPARK_TO_DEEQU_COORD_MAPPING in configs.py - Widen pyspark optional dep bound to <5.0.0 in pyproject.toml - Replace scala.collection.JavaConversions (removed in Scala 2.13) with JavaConverters in scala_utils.py and profiles.py - Replace scala.collection.Seq.empty() (inaccessible via Py4J in Scala 2.13) with to_scala_seq(jvm, jvm.java.util.ArrayList()) in analyzers.py and checks.py - Add Spark 4.0.0 to CI matrix with Java 17; use include: style to pair each Spark version with its required Java version Fixes #258 Co-Authored-By: Claude Sonnet 4.6 --- .github/workflows/base.yml | 17 +++++++++++++---- pydeequ/analyzers.py | 2 +- pydeequ/checks.py | 2 +- pydeequ/configs.py | 1 + pydeequ/profiles.py | 2 +- pydeequ/scala_utils.py | 6 +++--- pyproject.toml | 2 +- 7 files changed, 21 insertions(+), 11 deletions(-) diff --git a/.github/workflows/base.yml b/.github/workflows/base.yml index d5e4ec7..cad9f4a 100644 --- a/.github/workflows/base.yml +++ b/.github/workflows/base.yml @@ -12,7 +12,17 @@ jobs: strategy: fail-fast: false matrix: - PYSPARK_VERSION: ["3.1.3", "3.2", "3.3", "3.5"] + include: + - PYSPARK_VERSION: "3.1.3" + JAVA_VERSION: "11" + - PYSPARK_VERSION: "3.2" + JAVA_VERSION: "11" + - PYSPARK_VERSION: "3.3" + JAVA_VERSION: "11" + - PYSPARK_VERSION: "3.5" + JAVA_VERSION: "11" + - PYSPARK_VERSION: "4.0.0" + JAVA_VERSION: "17" steps: - uses: actions/checkout@v3 @@ -23,10 +33,9 @@ jobs: python-version: 3.8 - uses: actions/setup-java@v1 - name: Setup Java 11 - if: startsWith(matrix.PYSPARK_VERSION, '3') + name: Setup Java ${{matrix.JAVA_VERSION}} with: - java-version: "11" + java-version: ${{matrix.JAVA_VERSION}} - name: Running tests with pyspark==${{matrix.PYSPARK_VERSION}} env: diff --git a/pydeequ/analyzers.py b/pydeequ/analyzers.py index 3952c93..25b1072 100644 --- a/pydeequ/analyzers.py +++ b/pydeequ/analyzers.py @@ -311,7 +311,7 @@ def _analyzer_jvm(self): self.instance, self.predicate, self._jvm.scala.Option.apply(self.where), - self._jvm.scala.collection.Seq.empty(), + to_scala_seq(self._jvm, self._jvm.java.util.ArrayList()), self._jvm.scala.Option.apply(None) ) diff --git a/pydeequ/checks.py b/pydeequ/checks.py index 749f74d..cdd6032 100644 --- a/pydeequ/checks.py +++ b/pydeequ/checks.py @@ -563,7 +563,7 @@ def satisfies(self, columnCondition, constraintName, assertion=None, hint=None): constraintName, assertion_func, hint, - self._jvm.scala.collection.Seq.empty(), + to_scala_seq(self._jvm, self._jvm.java.util.ArrayList()), self._jvm.scala.Option.apply(None) ) return self diff --git a/pydeequ/configs.py b/pydeequ/configs.py index e56c97d..60c3403 100644 --- a/pydeequ/configs.py +++ b/pydeequ/configs.py @@ -5,6 +5,7 @@ SPARK_TO_DEEQU_COORD_MAPPING = { + "4.0": "com.amazon.deequ:deequ:2.0.14-spark-4.0", "3.5": "com.amazon.deequ:deequ:2.0.8-spark-3.5", "3.3": "com.amazon.deequ:deequ:2.0.8-spark-3.3", "3.2": "com.amazon.deequ:deequ:2.0.8-spark-3.2", diff --git a/pydeequ/profiles.py b/pydeequ/profiles.py index fbbfd84..c82dca2 100644 --- a/pydeequ/profiles.py +++ b/pydeequ/profiles.py @@ -254,7 +254,7 @@ def _columnProfilesFromColumnRunBuilderRun(self, run): :return: a setter for columnProfilerRunner result """ self._run_result = run - profile_map = self._jvm.scala.collection.JavaConversions.mapAsJavaMap(run.profiles()) # TODO from ScalaUtils + profile_map = self._jvm.scala.collection.JavaConverters.mapAsJavaMapConverter(run.profiles()).asJava() # TODO from ScalaUtils self._profiles = {column: self._columnProfileBuilder(column, profile_map[column]) for column in profile_map} return self diff --git a/pydeequ/scala_utils.py b/pydeequ/scala_utils.py index b6d3e83..5aa16ba 100644 --- a/pydeequ/scala_utils.py +++ b/pydeequ/scala_utils.py @@ -77,7 +77,7 @@ def to_scala_seq(jvm, iterable): Returns: Scala sequence """ - return jvm.scala.collection.JavaConversions.iterableAsScalaIterable(iterable).toSeq() + return jvm.scala.collection.JavaConverters.iterableAsScalaIterableConverter(iterable).asScala().toSeq() def to_scala_map(spark_session, d): @@ -93,11 +93,11 @@ def to_scala_map(spark_session, d): def scala_map_to_dict(jvm, scala_map): - return dict(jvm.scala.collection.JavaConversions.mapAsJavaMap(scala_map)) + return dict(jvm.scala.collection.JavaConverters.mapAsJavaMapConverter(scala_map).asJava()) def scala_map_to_java_map(jvm, scala_map): - return jvm.scala.collection.JavaConversions.mapAsJavaMap(scala_map) + return jvm.scala.collection.JavaConverters.mapAsJavaMapConverter(scala_map).asJava() def java_list_to_python_list(java_list: str, datatype): diff --git a/pyproject.toml b/pyproject.toml index dcb6a11..9abcce5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -31,7 +31,7 @@ classifiers = [ python = ">=3.8,<4" numpy = ">=1.14.1" pandas = ">=0.23.0" -pyspark = { version = ">=2.4.7,<3.4.0", optional = true } +pyspark = { version = ">=2.4.7,<5.0.0", optional = true } [tool.poetry.dev-dependencies] pytest = "^6.2.4"