Skip to content

Commit

Permalink
Rename OutputFormats to Output
Browse files Browse the repository at this point in the history
  • Loading branch information
jtnystrom committed Jan 16, 2023
1 parent 56969c5 commit 9a46e62
Show file tree
Hide file tree
Showing 4 changed files with 8 additions and 8 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ class CountedKmers(val counts: Dataset[(Array[Long], Abundance)], splitter: Broa
* @param output Directory to write to (prefix name)
*/
def writeFasta(output: String): Unit = {
OutputFormats.writeFastaCounts(withSequences, output)
Output.writeFastaCounts(withSequences, output)
}

/**
Expand All @@ -71,9 +71,9 @@ class CountedKmers(val counts: Dataset[(Array[Long], Abundance)], splitter: Broa
*/
def writeTSV(withKmers: Boolean, output: String): Unit = {
if (withKmers) {
OutputFormats.writeTSV(withSequences, output)
Output.writeTSV(withSequences, output)
} else {
OutputFormats.writeTSV(counts.map(_._2), output)
Output.writeTSV(counts.map(_._2), output)
}
}
}
6 changes: 3 additions & 3 deletions src/main/scala/com/jnpersson/discount/spark/Index.scala
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ class Index(val params: IndexParams, val buckets: Dataset[ReducibleBucket])
* @param output Directory to write to (prefix name)
*/
def writeHistogram(output: String): Unit =
OutputFormats.writeTSV(histogram, output)
Output.writeTSV(histogram, output)

/** Write per-bucket statistics to HDFS.
* This action triggers a computation.
Expand All @@ -222,15 +222,15 @@ class Index(val params: IndexParams, val buckets: Dataset[ReducibleBucket])
val bkts = stats()
bkts.cache()
bkts.write.mode(SaveMode.Overwrite).option("sep", "\t").csv(s"${location}_bucketStats")
OutputFormats.showStats(bkts, Some(location))
Output.showStats(bkts, Some(location))
bkts.unpersist()
}

/** Show summary stats for this index.
* This action triggers a computation.
*/
def showStats(outputLocation: Option[String] = None): Unit = {
OutputFormats.showStats(stats(), outputLocation)
Output.showStats(stats(), outputLocation)
}

/** Write this index to a location.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import org.apache.spark.sql.{Dataset, SaveMode, SparkSession}
/**
* Output format helper methods
*/
object OutputFormats {
object Output {
/**
* Write a data table as TSV to the filesystem.
* @param allKmers data to write
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ private[jnpersson] class DiscountConf(args: Array[String]) extends SparkToolConf
requireOne(inputFiles, indexLocation)

def run(implicit spark: SparkSession) : Unit =
OutputFormats.showStats(inputIndex().stats(min.toOption, max.toOption), output.toOption)
Output.showStats(inputIndex().stats(min.toOption, max.toOption), output.toOption)
}
addSubcommand(stats)

Expand Down

0 comments on commit 9a46e62

Please sign in to comment.