Skip to content

Commit

Permalink
Hive: Switch tests to JUnit5 (apache#8058)
Browse files Browse the repository at this point in the history
  • Loading branch information
coded9 authored Jul 15, 2023
1 parent 981f1a0 commit 3caa3a2
Show file tree
Hide file tree
Showing 14 changed files with 537 additions and 515 deletions.
4 changes: 4 additions & 0 deletions build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -624,6 +624,10 @@ project(':iceberg-gcp') {
}

project(':iceberg-hive-metastore') {
test {
useJUnitPlatform()
}

dependencies {
implementation project(path: ':iceberg-bundled-guava', configuration: 'shadow')
implementation project(':iceberg-core')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,11 @@

import static org.apache.iceberg.PartitionSpec.builderFor;
import static org.apache.iceberg.types.Types.NestedField.required;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;

import java.io.IOException;
import java.nio.file.Path;
import org.apache.iceberg.AppendFiles;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.DataFiles;
Expand All @@ -33,16 +36,12 @@
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.exceptions.AlreadyExistsException;
import org.apache.iceberg.exceptions.NoSuchTableException;
import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.types.Types;
import org.assertj.core.api.Assertions;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;

public class HiveCreateReplaceTableTest extends HiveMetastoreTest {

Expand All @@ -53,58 +52,58 @@ public class HiveCreateReplaceTableTest extends HiveMetastoreTest {
required(3, "id", Types.IntegerType.get()), required(4, "data", Types.StringType.get()));
private static final PartitionSpec SPEC = builderFor(SCHEMA).identity("id").build();

@Rule public TemporaryFolder temp = new TemporaryFolder();
@TempDir private Path temp;

private String tableLocation;

@Before
@BeforeEach
public void createTableLocation() throws IOException {
tableLocation = temp.newFolder("hive-").getPath();
tableLocation = temp.resolve("hive-").toString();
}

@After
@AfterEach
public void cleanup() {
catalog.dropTable(TABLE_IDENTIFIER);
}

@Test
public void testCreateTableTxn() {
Assert.assertFalse("Table should not exist", catalog.tableExists(TABLE_IDENTIFIER));
assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should not exist").isFalse();

Transaction txn =
catalog.newCreateTableTransaction(
TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, Maps.newHashMap());
txn.updateProperties().set("prop", "value").commit();

// verify the table is still not visible before the transaction is committed
Assert.assertFalse(catalog.tableExists(TABLE_IDENTIFIER));
assertThat(catalog.tableExists(TABLE_IDENTIFIER)).isFalse();

txn.commitTransaction();

Table table = catalog.loadTable(TABLE_IDENTIFIER);
Assert.assertEquals("Table props should match", "value", table.properties().get("prop"));
assertThat(table.properties()).as("Table props should match").containsEntry("prop", "value");
}

@Test
public void testCreateTableTxnTableCreatedConcurrently() {
Assert.assertFalse("Table should not exist", catalog.tableExists(TABLE_IDENTIFIER));
assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should not exist").isFalse();

Transaction txn =
catalog.newCreateTableTransaction(
TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, Maps.newHashMap());

// create the table concurrently
catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC);
Assert.assertTrue("Table should be created", catalog.tableExists(TABLE_IDENTIFIER));
assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should be created").isTrue();

Assertions.assertThatThrownBy(txn::commitTransaction)
assertThatThrownBy(txn::commitTransaction)
.isInstanceOf(AlreadyExistsException.class)
.hasMessage("Table already exists: hivedb.tbl");
}

@Test
public void testCreateTableTxnAndAppend() {
Assert.assertFalse("Table should not exist", catalog.tableExists(TABLE_IDENTIFIER));
assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should not exist").isFalse();

Transaction txn =
catalog.newCreateTableTransaction(
Expand All @@ -123,19 +122,20 @@ public void testCreateTableTxnAndAppend() {

Table table = catalog.loadTable(TABLE_IDENTIFIER);
Snapshot snapshot = table.currentSnapshot();
Assert.assertTrue(
"Table should have one manifest file", snapshot.allManifests(table.io()).size() == 1);
assertThat(snapshot.allManifests(table.io()))
.as("Table should have one manifest file")
.hasSize(1);
}

@Test
public void testCreateTableTxnTableAlreadyExists() {
Assert.assertFalse("Table should not exist", catalog.tableExists(TABLE_IDENTIFIER));
assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should not exist").isFalse();

// create a table before starting a transaction
catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC);
Assert.assertTrue("Table should be created", catalog.tableExists(TABLE_IDENTIFIER));
assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should be created").isTrue();

Assertions.assertThatThrownBy(
assertThatThrownBy(
() ->
catalog.newCreateTableTransaction(
TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, Maps.newHashMap()))
Expand All @@ -146,20 +146,22 @@ public void testCreateTableTxnTableAlreadyExists() {
@Test
public void testReplaceTableTxn() {
catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, Maps.newHashMap());
Assert.assertTrue("Table should exist", catalog.tableExists(TABLE_IDENTIFIER));
assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should exist").isTrue();

Transaction txn = catalog.newReplaceTableTransaction(TABLE_IDENTIFIER, SCHEMA, false);
txn.commitTransaction();

Table table = catalog.loadTable(TABLE_IDENTIFIER);
PartitionSpec v1Expected =
PartitionSpec.builderFor(table.schema()).alwaysNull("id", "id").withSpecId(1).build();
Assert.assertEquals("Table should have a spec with one void field", v1Expected, table.spec());
assertThat(table.spec())
.as("Table should have a spec with one void field")
.isEqualTo(v1Expected);
}

@Test
public void testReplaceTableTxnTableNotExists() {
Assertions.assertThatThrownBy(
assertThatThrownBy(
() -> catalog.newReplaceTableTransaction(TABLE_IDENTIFIER, SCHEMA, SPEC, false))
.isInstanceOf(NoSuchTableException.class)
.hasMessage("Table does not exist: hivedb.tbl");
Expand All @@ -168,15 +170,15 @@ public void testReplaceTableTxnTableNotExists() {
@Test
public void testReplaceTableTxnTableDeletedConcurrently() {
catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, Maps.newHashMap());
Assert.assertTrue("Table should exist", catalog.tableExists(TABLE_IDENTIFIER));
assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should exist").isTrue();

Transaction txn = catalog.newReplaceTableTransaction(TABLE_IDENTIFIER, SCHEMA, SPEC, false);

catalog.dropTable(TABLE_IDENTIFIER);

txn.updateProperties().set("prop", "value").commit();

Assertions.assertThatThrownBy(txn::commitTransaction)
assertThatThrownBy(txn::commitTransaction)
.isInstanceOf(NoSuchTableException.class)
.hasMessage("No such table: hivedb.tbl");
}
Expand All @@ -185,7 +187,7 @@ public void testReplaceTableTxnTableDeletedConcurrently() {
public void testReplaceTableTxnTableModifiedConcurrently() {
Table table =
catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, Maps.newHashMap());
Assert.assertTrue("Table should exist", catalog.tableExists(TABLE_IDENTIFIER));
assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should exist").isTrue();

Transaction txn = catalog.newReplaceTableTransaction(TABLE_IDENTIFIER, SCHEMA, SPEC, false);

Expand All @@ -197,42 +199,45 @@ public void testReplaceTableTxnTableModifiedConcurrently() {

// the replace should still succeed
table = catalog.loadTable(TABLE_IDENTIFIER);
Assert.assertNull("Table props should be updated", table.properties().get("another-prop"));
Assert.assertEquals("Table props should match", "value", table.properties().get("prop"));
assertThat(table.properties())
.as("Table props should be updated")
.doesNotContainKey("another-prop")
.containsEntry("prop", "value");
}

@Test
public void testCreateOrReplaceTableTxnTableNotExists() {
Assert.assertFalse("Table should not exist", catalog.tableExists(TABLE_IDENTIFIER));
assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should not exist").isFalse();

Transaction txn = catalog.newReplaceTableTransaction(TABLE_IDENTIFIER, SCHEMA, SPEC, true);
txn.updateProperties().set("prop", "value").commit();
txn.commitTransaction();

Table table = catalog.loadTable(TABLE_IDENTIFIER);
Assert.assertEquals("Table props should match", "value", table.properties().get("prop"));
assertThat(table.properties()).as("Table props should match").containsEntry("prop", "value");
}

@Test
public void testCreateOrReplaceTableTxnTableExists() {
catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC, tableLocation, Maps.newHashMap());
Assert.assertTrue("Table should exist", catalog.tableExists(TABLE_IDENTIFIER));
assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should exist").isTrue();

Transaction txn = catalog.newReplaceTableTransaction(TABLE_IDENTIFIER, SCHEMA, true);
txn.commitTransaction();

Table table = catalog.loadTable(TABLE_IDENTIFIER);
PartitionSpec v1Expected =
PartitionSpec.builderFor(table.schema()).alwaysNull("id", "id").withSpecId(1).build();
Assert.assertEquals("Table should have a spec with one void field", v1Expected, table.spec());
assertThat(table.spec())
.as("Table should have a spec with one void field")
.isEqualTo(v1Expected);
}

@Test
public void testCreateOrReplaceTableTxnTableDeletedConcurrently() {
Assert.assertFalse("Table should not exist", catalog.tableExists(TABLE_IDENTIFIER));

assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should not exist").isFalse();
catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC);
Assert.assertTrue("Table should be created", catalog.tableExists(TABLE_IDENTIFIER));
assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should be created").isTrue();

Transaction txn =
catalog.newReplaceTableTransaction(
Expand All @@ -251,12 +256,12 @@ public void testCreateOrReplaceTableTxnTableDeletedConcurrently() {
txn.commitTransaction();

Table table = catalog.loadTable(TABLE_IDENTIFIER);
Assert.assertEquals("Table props should match", "value", table.properties().get("prop"));
assertThat(table.properties()).as("Table props should match").containsEntry("prop", "value");
}

@Test
public void testCreateOrReplaceTableTxnTableCreatedConcurrently() {
Assert.assertFalse("Table should not exist", catalog.tableExists(TABLE_IDENTIFIER));
assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should not exist").isFalse();

Transaction txn =
catalog.newReplaceTableTransaction(
Expand All @@ -270,19 +275,21 @@ public void testCreateOrReplaceTableTxnTableCreatedConcurrently() {

// create the table concurrently
catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC);
Assert.assertTrue("Table should be created", catalog.tableExists(TABLE_IDENTIFIER));
assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should be created").isTrue();

// expect the transaction to succeed anyway
txn.commitTransaction();

Table table = catalog.loadTable(TABLE_IDENTIFIER);
Assert.assertEquals("Partition spec should match", PartitionSpec.unpartitioned(), table.spec());
Assert.assertEquals("Table props should match", "value", table.properties().get("prop"));
assertThat(table.spec())
.as("Partition spec should match")
.isEqualTo(PartitionSpec.unpartitioned());
assertThat(table.properties()).as("Table props should match").containsEntry("prop", "value");
}

@Test
public void testCreateTableTxnWithGlobalTableLocation() {
Assert.assertFalse("Table should not exist", catalog.tableExists(TABLE_IDENTIFIER));
assertThat(catalog.tableExists(TABLE_IDENTIFIER)).as("Table should not exist").isFalse();

Transaction txn =
catalog.newCreateTableTransaction(
Expand All @@ -300,6 +307,6 @@ public void testCreateTableTxnWithGlobalTableLocation() {

table.newAppend().appendFile(dataFile).commit();

Assert.assertEquals("Write should succeed", 1, Iterables.size(table.snapshots()));
assertThat(table.snapshots()).as("Write should succeed").hasSize(1);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@
import org.apache.iceberg.CatalogUtil;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;

public abstract class HiveMetastoreTest {

Expand All @@ -41,7 +41,7 @@ public abstract class HiveMetastoreTest {
protected static HiveConf hiveConf;
protected static TestHiveMetastore metastore;

@BeforeClass
@BeforeAll
public static void startMetastore() throws Exception {
startMetastore(Collections.emptyMap());
}
Expand Down Expand Up @@ -72,7 +72,7 @@ public static void startMetastore(Map<String, String> hiveConfOverride) throws E
hiveConfWithOverrides);
}

@AfterClass
@AfterAll
public static void stopMetastore() throws Exception {
HiveMetastoreTest.catalog = null;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@
import org.apache.iceberg.TableMetadataParser;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.types.Types;
import org.junit.After;
import org.junit.Before;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;

public class HiveTableBaseTest extends HiveMetastoreTest {

Expand All @@ -56,13 +56,13 @@ public class HiveTableBaseTest extends HiveMetastoreTest {

private Path tableLocation;

@Before
@BeforeEach
public void createTestTable() {
this.tableLocation =
new Path(catalog.createTable(TABLE_IDENTIFIER, schema, partitionSpec).location());
}

@After
@AfterEach
public void dropTestTable() throws Exception {
// drop the table data
tableLocation.getFileSystem(hiveConf).delete(tableLocation, true);
Expand Down
Loading

0 comments on commit 3caa3a2

Please sign in to comment.