Skip to content

Commit

Permalink
[MINOR] Fix Java style errors and remove unused imports
Browse files Browse the repository at this point in the history
## What changes were proposed in this pull request?

Fix Java style errors and remove unused imports, which are randomly found

## How was this patch tested?

Tested on my local machine.

Author: Xin Ren <[email protected]>

Closes apache#14161 from keypointt/SPARK-16437.
  • Loading branch information
keypointt authored and srowen committed Jul 13, 2016
1 parent f156136 commit f73891e
Show file tree
Hide file tree
Showing 4 changed files with 4 additions and 7 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
import java.util.Map;

import org.apache.spark.unsafe.Platform;
import org.apache.spark.unsafe.memory.MemoryAllocator;

/**
* A simple {@link MemoryAllocator} that can allocate up to 16GB using a JVM long primitive array.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,12 @@ public interface MemoryAllocator {
* Whether to fill newly allocated and deallocated memory with 0xa5 and 0x5a bytes respectively.
* This helps catch misuse of uninitialized or freed memory, but imposes some overhead.
*/
public static final boolean MEMORY_DEBUG_FILL_ENABLED = Boolean.parseBoolean(
boolean MEMORY_DEBUG_FILL_ENABLED = Boolean.parseBoolean(
System.getProperty("spark.memory.debugFill", "false"));

// Same as jemalloc's debug fill values.
public static final byte MEMORY_DEBUG_FILL_CLEAN_VALUE = (byte)0xa5;
public static final byte MEMORY_DEBUG_FILL_FREED_VALUE = (byte)0x5a;
byte MEMORY_DEBUG_FILL_CLEAN_VALUE = (byte)0xa5;
byte MEMORY_DEBUG_FILL_FREED_VALUE = (byte)0x5a;

/**
* Allocates a contiguous block of memory. Note that the allocated memory is not guaranteed
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -780,8 +780,7 @@ private[sql] object ParquetFileFormat extends Logging {
val assumeBinaryIsString = sparkSession.sessionState.conf.isParquetBinaryAsString
val assumeInt96IsTimestamp = sparkSession.sessionState.conf.isParquetINT96AsTimestamp
val writeLegacyParquetFormat = sparkSession.sessionState.conf.writeLegacyParquetFormat
val serializedConf =
new SerializableConfiguration(sparkSession.sessionState.newHadoopConf())
val serializedConf = new SerializableConfiguration(sparkSession.sessionState.newHadoopConf())

// !! HACK ALERT !!
//
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
package org.apache.spark.sql.sources

import org.apache.spark.sql._
import org.apache.spark.sql.internal.SQLConf

private[sql] abstract class DataSourceTest extends QueryTest {

Expand Down

0 comments on commit f73891e

Please sign in to comment.