Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file modified docs/static/img/chain-table.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,11 @@
import org.apache.paimon.partition.PartitionTimeExtractor;
import org.apache.paimon.predicate.Predicate;
import org.apache.paimon.predicate.PredicateBuilder;
import org.apache.paimon.table.ChainGroupReadTable;
import org.apache.paimon.table.FallbackReadFileStoreTable;
import org.apache.paimon.table.FileStoreTable;
import org.apache.paimon.table.sink.BatchTableCommit;
import org.apache.paimon.table.sink.CommitMessage;
import org.apache.paimon.types.RowType;

import java.time.LocalDateTime;
Expand Down Expand Up @@ -204,4 +209,49 @@ public static LinkedHashMap<String, String> calPartValues(
}
return res;
}

public static void postCommit(
FileStoreTable table, boolean overwrite, List<CommitMessage> commitMessages) {
if (overwrite) {
FileStoreTable candidateTbl = table;
if (table instanceof FallbackReadFileStoreTable) {
candidateTbl =
((ChainGroupReadTable) ((FallbackReadFileStoreTable) table).fallback())
.wrapped();
}
FileStoreTable snapshotTbl =
candidateTbl.switchToBranch(table.coreOptions().scanFallbackSnapshotBranch());
InternalRowPartitionComputer partitionComputer =
new InternalRowPartitionComputer(
table.coreOptions().partitionDefaultName(),
table.schema().logicalPartitionType(),
table.schema().partitionKeys().toArray(new String[0]),
table.coreOptions().legacyPartitionName());
List<BinaryRow> overwritePartitions =
commitMessages.stream()
.map(CommitMessage::partition)
.distinct()
.collect(Collectors.toList());
if (!overwritePartitions.isEmpty()) {
List<Map<String, String>> candidatePartitions =
overwritePartitions.stream()
.map(partition -> partitionComputer.generatePartValues(partition))
.collect(Collectors.toList());
try (BatchTableCommit commit = snapshotTbl.newBatchWriteBuilder().newCommit()) {
commit.truncatePartitions(candidatePartitions);
} catch (Exception e) {
throw new RuntimeException(
String.format(
"Failed to truncate partitions in snapshot table %s.",
candidatePartitions),
e);
}
}
}
}

public static boolean chainScanFallbackDeltaBranch(CoreOptions options) {
return options.isChainTable()
&& options.scanFallbackDeltaBranch().equalsIgnoreCase(options.branch());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ import org.apache.paimon.options.Options
import org.apache.paimon.spark._
import org.apache.paimon.spark.catalyst.analysis.expressions.ExpressionHelper
import org.apache.paimon.table.FileStoreTable
import org.apache.paimon.table.sink.CommitMessage
import org.apache.paimon.utils.ChainTableUtils

import org.apache.spark.internal.Logging
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
Expand Down Expand Up @@ -57,7 +59,7 @@ case class WriteIntoPaimonTable(
}
val commitMessages = writer.write(data)
writer.commit(commitMessages)

postCommit(dynamicPartitionOverwriteMode, overwritePartition, commitMessages)
Seq.empty
}

Expand All @@ -82,6 +84,18 @@ case class WriteIntoPaimonTable(
(dynamicPartitionOverwriteMode, overwritePartition)
}

private def postCommit(
dynamicPartitionOverwrite: Boolean,
staticOverwritePartition: Map[String, String],
commitMessages: Seq[CommitMessage]): Unit = {
if (ChainTableUtils.chainScanFallbackDeltaBranch(table.coreOptions())) {
ChainTableUtils.postCommit(
table,
dynamicPartitionOverwrite || staticOverwritePartition != null,
commitMessages.toList.asJava)
}
}

override def withNewChildrenInternal(newChildren: IndexedSeq[LogicalPlan]): LogicalPlan =
this.asInstanceOf[WriteIntoPaimonTable]
}
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import org.apache.paimon.hive.TestHiveMetastore;
import org.apache.paimon.table.FileStoreTableFactory;

import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.junit.jupiter.api.AfterAll;
Expand Down Expand Up @@ -380,6 +381,24 @@ public void testChainTable(@TempDir java.nio.file.Path tempDir) throws IOExcepti
spark.close();
spark = builder.getOrCreate();

spark.sql("set spark.paimon.branch=delta;");
spark.sql(
"insert overwrite table `my_db1`.`chain_test` values (5, 2, '1', '20250813'),(6, 2, '1', '20250814');");

spark.close();
spark = builder.getOrCreate();
Dataset<Row> df =
spark.sql(
"SELECT t1,t2,t3 FROM `my_db1`.`chain_test$branch_snapshot` where dt = '20250814'");
assertThat(df.count()).isEqualTo(0);
df =
spark.sql(
"SELECT t1,t2,t3 FROM `my_db1`.`chain_test$branch_delta` where dt = '20250814'");
assertThat(df.count()).isEqualTo(1);

spark.close();
spark = builder.getOrCreate();

/** Drop table */
spark.sql("DROP TABLE IF EXISTS `my_db1`.`chain_test`;");

Expand Down Expand Up @@ -593,6 +612,25 @@ public void testHourlyChainTable(@TempDir java.nio.file.Path tempDir) throws IOE
spark.close();
spark = builder.getOrCreate();

spark.sql("set spark.paimon.branch=delta;");
spark.sql(
"insert overwrite table `my_db1`.`chain_test` values (6, 2, '1', '20250811', '02');");

spark.close();
spark = builder.getOrCreate();

Dataset<Row> df =
spark.sql(
"SELECT t1,t2,t3 FROM `my_db1`.`chain_test$branch_snapshot` where dt = '20250811' and hour = '02'");
assertThat(df.count()).isEqualTo(0);
df =
spark.sql(
"SELECT t1,t2,t3 FROM `my_db1`.`chain_test$branch_delta` where dt = '20250811' and hour = '02'");
assertThat(df.count()).isEqualTo(1);

spark.close();
spark = builder.getOrCreate();

/** Drop table */
spark.sql("DROP TABLE IF EXISTS `my_db1`.`chain_test`;");

Expand Down
Loading