-
Notifications
You must be signed in to change notification settings - Fork 341
Add spark sql integration test for Hudi #3194
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,182 @@ | ||
| /* | ||
| * Licensed to the Apache Software Foundation (ASF) under one | ||
| * or more contributor license agreements. See the NOTICE file | ||
| * distributed with this work for additional information | ||
| * regarding copyright ownership. The ASF licenses this file | ||
| * to you under the Apache License, Version 2.0 (the | ||
| * "License"); you may not use this file except in compliance | ||
| * with the License. You may obtain a copy of the License at | ||
| * | ||
| * http://www.apache.org/licenses/LICENSE-2.0 | ||
| * | ||
| * Unless required by applicable law or agreed to in writing, | ||
| * software distributed under the License is distributed on an | ||
| * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | ||
| * KIND, either express or implied. See the License for the | ||
| * specific language governing permissions and limitations | ||
| * under the License. | ||
| */ | ||
| package org.apache.polaris.spark.quarkus.it; | ||
|
|
||
| import static org.assertj.core.api.Assertions.assertThat; | ||
| import static org.assertj.core.api.Assertions.assertThatThrownBy; | ||
|
|
||
| import io.quarkus.test.junit.QuarkusIntegrationTest; | ||
| import java.io.File; | ||
| import java.nio.file.Path; | ||
| import java.util.List; | ||
| import org.apache.commons.io.FileUtils; | ||
| import org.apache.polaris.service.it.env.IntegrationTestsHelper; | ||
| import org.apache.spark.sql.SparkSession; | ||
| import org.junit.jupiter.api.AfterEach; | ||
| import org.junit.jupiter.api.BeforeEach; | ||
| import org.junit.jupiter.api.Test; | ||
| import org.junit.jupiter.api.io.TempDir; | ||
|
|
||
| @QuarkusIntegrationTest | ||
| public class SparkHudiIT extends SparkIntegrationBase { | ||
|
|
||
| @Override | ||
| protected SparkSession buildSparkSession() { | ||
| return SparkSession.builder() | ||
| .master("local[1]") | ||
| .config("spark.ui.showConsoleProgress", "false") | ||
| .config("spark.ui.enabled", "false") | ||
| .config("spark.sql.extensions", "org.apache.spark.sql.hudi.HoodieSparkSessionExtension") | ||
| .config( | ||
| "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.hudi.catalog.HoodieCatalog") | ||
| .config( | ||
| String.format("spark.sql.catalog.%s", catalogName), | ||
| "org.apache.polaris.spark.SparkCatalog") | ||
| .config("spark.sql.warehouse.dir", warehouseDir.toString()) | ||
| .config(String.format("spark.sql.catalog.%s.type", catalogName), "rest") | ||
| .config( | ||
| String.format("spark.sql.catalog.%s.uri", catalogName), | ||
| endpoints.catalogApiEndpoint().toString()) | ||
| .config(String.format("spark.sql.catalog.%s.warehouse", catalogName), catalogName) | ||
| .config(String.format("spark.sql.catalog.%s.scope", catalogName), "PRINCIPAL_ROLE:ALL") | ||
| .config( | ||
| String.format("spark.sql.catalog.%s.header.realm", catalogName), endpoints.realmId()) | ||
| .config(String.format("spark.sql.catalog.%s.token", catalogName), sparkToken) | ||
| .config(String.format("spark.sql.catalog.%s.s3.access-key-id", catalogName), "fakekey") | ||
| .config( | ||
| String.format("spark.sql.catalog.%s.s3.secret-access-key", catalogName), "fakesecret") | ||
| .config(String.format("spark.sql.catalog.%s.s3.region", catalogName), "us-west-2") | ||
| .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer") | ||
| .config("spark.kryo.registrator", "org.apache.spark.HoodieSparkKryoRegistrar") | ||
| // for intial integration test have disabled for now, to revisit enabling in future | ||
| .config("hoodie.metadata.enable", "false") | ||
| .getOrCreate(); | ||
| } | ||
|
|
||
| private String defaultNs; | ||
| private String tableRootDir; | ||
|
|
||
| private String getTableLocation(String tableName) { | ||
| return String.format("%s/%s", tableRootDir, tableName); | ||
| } | ||
|
|
||
| private String getTableNameWithRandomSuffix() { | ||
| return generateName("huditb"); | ||
| } | ||
|
|
||
| @BeforeEach | ||
| public void createDefaultResources(@TempDir Path tempDir) { | ||
| spark.sparkContext().setLogLevel("INFO"); | ||
| defaultNs = generateName("hudi"); | ||
| // create a default namespace | ||
| sql("CREATE NAMESPACE %s", defaultNs); | ||
| sql("USE NAMESPACE %s", defaultNs); | ||
| tableRootDir = | ||
| IntegrationTestsHelper.getTemporaryDirectory(tempDir).resolve(defaultNs).getPath(); | ||
| } | ||
|
|
||
| @AfterEach | ||
| public void cleanupHudiData() { | ||
| // clean up hudi data | ||
| if (tableRootDir != null) { | ||
| File dirToDelete = new File(tableRootDir); | ||
| FileUtils.deleteQuietly(dirToDelete); | ||
| } | ||
| if (defaultNs != null) { | ||
| sql("DROP NAMESPACE %s", defaultNs); | ||
| } | ||
| } | ||
|
|
||
| @Test | ||
| public void testBasicTableOperations() { | ||
| // create a regular hudi table | ||
| String huditb1 = "huditb1"; | ||
| sql( | ||
| "CREATE TABLE %s (id INT, name STRING) USING HUDI LOCATION '%s'", | ||
| huditb1, getTableLocation(huditb1)); | ||
| sql("INSERT INTO %s VALUES (1, 'anna'), (2, 'bob')", huditb1); | ||
| List<Object[]> results = sql("SELECT id,name FROM %s WHERE id > 1 ORDER BY id DESC", huditb1); | ||
| assertThat(results.size()).isEqualTo(1); | ||
| assertThat(results.get(0)).isEqualTo(new Object[] {2, "bob"}); | ||
|
|
||
| // create a hudi table with partition | ||
| String huditb2 = "huditb2"; | ||
| sql( | ||
| "CREATE TABLE %s (name String, age INT, country STRING) USING HUDI PARTITIONED BY (country) LOCATION '%s'", | ||
| huditb2, getTableLocation(huditb2)); | ||
| sql( | ||
| "INSERT INTO %s VALUES ('anna', 10, 'US'), ('james', 32, 'US'), ('yan', 16, 'CHINA')", | ||
| huditb2); | ||
| results = sql("SELECT name, country FROM %s ORDER BY age", huditb2); | ||
| assertThat(results.size()).isEqualTo(3); | ||
| assertThat(results.get(0)).isEqualTo(new Object[] {"anna", "US"}); | ||
| assertThat(results.get(1)).isEqualTo(new Object[] {"yan", "CHINA"}); | ||
| assertThat(results.get(2)).isEqualTo(new Object[] {"james", "US"}); | ||
|
|
||
| // verify the partition dir is created | ||
| List<String> subDirs = listDirs(getTableLocation(huditb2)); | ||
| assertThat(subDirs).contains(".hoodie", "country=CHINA", "country=US"); | ||
|
|
||
| // test listTables | ||
| List<Object[]> tables = sql("SHOW TABLES"); | ||
| assertThat(tables.size()).isEqualTo(2); | ||
| assertThat(tables) | ||
| .contains( | ||
| new Object[] {defaultNs, huditb1, false}, new Object[] {defaultNs, huditb2, false}); | ||
|
|
||
| sql("DROP TABLE %s", huditb1); | ||
| sql("DROP TABLE %s", huditb2); | ||
| tables = sql("SHOW TABLES"); | ||
| assertThat(tables.size()).isEqualTo(0); | ||
| } | ||
|
|
||
| @Test | ||
| public void testUnsupportedAlterTableOperations() { | ||
| String huditb = getTableNameWithRandomSuffix(); | ||
| sql( | ||
| "CREATE TABLE %s (name String, age INT, country STRING) USING HUDI PARTITIONED BY (country) LOCATION '%s'", | ||
| huditb, getTableLocation(huditb)); | ||
|
|
||
| // ALTER TABLE ... RENAME TO ... fails | ||
| assertThatThrownBy(() -> sql("ALTER TABLE %s RENAME TO new_hudi", huditb)) | ||
| .isInstanceOf(UnsupportedOperationException.class); | ||
|
|
||
| // ALTER TABLE ... SET LOCATION ... fails | ||
| assertThatThrownBy(() -> sql("ALTER TABLE %s SET LOCATION '/tmp/new/path'", huditb)) | ||
| .isInstanceOf(UnsupportedOperationException.class); | ||
|
|
||
| sql("DROP TABLE %s", huditb); | ||
| } | ||
|
|
||
| @Test | ||
| public void testUnsupportedTableCreateOperations() { | ||
| String huditb = getTableNameWithRandomSuffix(); | ||
| // create hudi table with no location | ||
| assertThatThrownBy(() -> sql("CREATE TABLE %s (id INT, name STRING) USING HUDI", huditb)) | ||
| .isInstanceOf(UnsupportedOperationException.class); | ||
|
|
||
| // CTAS fails | ||
| assertThatThrownBy( | ||
| () -> | ||
| sql( | ||
| "CREATE TABLE %s USING HUDI LOCATION '%s' AS SELECT 1 AS id", | ||
| huditb, getTableLocation(huditb))) | ||
| .isInstanceOf(IllegalArgumentException.class); | ||
| } | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -46,6 +46,7 @@ dependencies { | |
| // TODO: extract a polaris-rest module as a thin layer for | ||
| // client to depends on. | ||
| implementation(project(":polaris-core")) { isTransitive = false } | ||
| testImplementation("org.apache.hudi:hudi-spark3.5-bundle_${scalaVersion}:1.1.0") | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Nit: we put versions in the file
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. for the actual spark project, we don't really intend to introduce any table format specific dependency, even for testing. i didn't see any change in the actual spark project, is there a reason that we need this? |
||
|
|
||
| implementation( | ||
| "org.apache.iceberg:iceberg-spark-runtime-${sparkMajorVersion}_${scalaVersion}:${icebergVersion}" | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@rahil-c could you also update the readme to include the support for hudi?
It would be great if we could also have a notebook in the get-started to help people to onboard for hudi, we could do that in follow up, we should also extend the regress test to include actual end to end test for hudi to avoid any potential break of the feature