Skip to content

Commit

Permalink
[SPARK-3515][SQL] Moves test suite setup code to beforeAll rather tha…
Browse files Browse the repository at this point in the history
…n in constructor

Please refer to the JIRA ticket for details.

**NOTE** We should check all test suites that do similar initialization-like side effects in their constructors. This PR only fixes `ParquetMetastoreSuite` because it breaks our Jenkins Maven build.

Author: Cheng Lian <[email protected]>

Closes apache#2375 from liancheng/say-no-to-constructor and squashes the following commits:

0ceb75b [Cheng Lian] Moves test suite setup code to beforeAll rather than in constructor
  • Loading branch information
liancheng authored and marmbrus committed Sep 13, 2014
1 parent 885d162 commit 6d887db
Showing 1 changed file with 24 additions and 29 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,10 @@ package org.apache.spark.sql.parquet

import java.io.File

import org.apache.spark.sql.hive.execution.HiveTableScan
import org.scalatest.BeforeAndAfterAll

import scala.reflect.ClassTag

import org.apache.spark.sql.{SQLConf, QueryTest}
import org.apache.spark.sql.execution.{BroadcastHashJoin, ShuffledHashJoin}
import org.apache.spark.sql.hive.test.TestHive
import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.hive.execution.HiveTableScan
import org.apache.spark.sql.hive.test.TestHive._

case class ParquetData(intField: Int, stringField: String)
Expand All @@ -36,27 +32,19 @@ case class ParquetData(intField: Int, stringField: String)
* Tests for our SerDe -> Native parquet scan conversion.
*/
class ParquetMetastoreSuite extends QueryTest with BeforeAndAfterAll {

override def beforeAll(): Unit = {
setConf("spark.sql.hive.convertMetastoreParquet", "true")
}

override def afterAll(): Unit = {
setConf("spark.sql.hive.convertMetastoreParquet", "false")
}

val partitionedTableDir = File.createTempFile("parquettests", "sparksql")
partitionedTableDir.delete()
partitionedTableDir.mkdir()

(1 to 10).foreach { p =>
val partDir = new File(partitionedTableDir, s"p=$p")
sparkContext.makeRDD(1 to 10)
.map(i => ParquetData(i, s"part-$p"))
.saveAsParquetFile(partDir.getCanonicalPath)
}

sql(s"""
val partitionedTableDir = File.createTempFile("parquettests", "sparksql")
partitionedTableDir.delete()
partitionedTableDir.mkdir()

(1 to 10).foreach { p =>
val partDir = new File(partitionedTableDir, s"p=$p")
sparkContext.makeRDD(1 to 10)
.map(i => ParquetData(i, s"part-$p"))
.saveAsParquetFile(partDir.getCanonicalPath)
}

sql(s"""
create external table partitioned_parquet
(
intField INT,
Expand All @@ -70,7 +58,7 @@ class ParquetMetastoreSuite extends QueryTest with BeforeAndAfterAll {
location '${partitionedTableDir.getCanonicalPath}'
""")

sql(s"""
sql(s"""
create external table normal_parquet
(
intField INT,
Expand All @@ -83,8 +71,15 @@ class ParquetMetastoreSuite extends QueryTest with BeforeAndAfterAll {
location '${new File(partitionedTableDir, "p=1").getCanonicalPath}'
""")

(1 to 10).foreach { p =>
sql(s"ALTER TABLE partitioned_parquet ADD PARTITION (p=$p)")
(1 to 10).foreach { p =>
sql(s"ALTER TABLE partitioned_parquet ADD PARTITION (p=$p)")
}

setConf("spark.sql.hive.convertMetastoreParquet", "true")
}

override def afterAll(): Unit = {
setConf("spark.sql.hive.convertMetastoreParquet", "false")
}

test("project the partitioning column") {
Expand Down

0 comments on commit 6d887db

Please sign in to comment.