From 836dc60a1ef5d64ba806e00695f49fe808c736cc Mon Sep 17 00:00:00 2001 From: Kevin Normoyle Date: Wed, 8 May 2013 14:53:11 -0700 Subject: [PATCH 1/2] updat --- py/h2o_perf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py/h2o_perf.py b/py/h2o_perf.py index 745e58c5ed..c84af8d539 100644 --- a/py/h2o_perf.py +++ b/py/h2o_perf.py @@ -221,7 +221,7 @@ def log_window(w): found = False for k in histogram: ### print k - found |= log_window(30) + found |= log_window(60) ### log_window(30) if not found: print "iostats: desired window not found in histogram" From 79402c00f926595bd9eaece404bbc19a480bff71 Mon Sep 17 00:00:00 2001 From: Kevin Normoyle Date: Wed, 8 May 2013 14:53:40 -0700 Subject: [PATCH 2/2] updat --- .../test_parse_nflx_loop_s3n_hdfs.py | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/py/testdir_hosts/test_parse_nflx_loop_s3n_hdfs.py b/py/testdir_hosts/test_parse_nflx_loop_s3n_hdfs.py index e637e06e69..9fa1a4609d 100644 --- a/py/testdir_hosts/test_parse_nflx_loop_s3n_hdfs.py +++ b/py/testdir_hosts/test_parse_nflx_loop_s3n_hdfs.py @@ -33,15 +33,15 @@ def test_parse_nflx_loop_s3n_hdfs(self): # ("syn_datasets/syn_7350063254201195578_10000x200.csv_000[23][0-9]", "syn_20.csv", 20 * avgSynSize, 700), # ("syn_datasets/syn_7350063254201195578_10000x200.csv_000[45678][0-9]", "syn_50.csv", 50 * avgSynSize, 700), - ("[A-D]-800-manyfiles-nflx-gz/file_[0-9]*.dat.gz", "file_A_800_x55.dat.gz", 800 * (avgMichalSize/2), 7200), - ("[A-D]-800-manyfiles-nflx-gz/file_[0-9]*.dat.gz", "file_B_800_x55.dat.gz", 800 * (avgMichalSize/2), 7200), - ("[A-D]-800-manyfiles-nflx-gz/file_[0-9]*.dat.gz", "file_C_800_x55.dat.gz", 800 * (avgMichalSize/2), 7200), - ("[A-D]-800-manyfiles-nflx-gz/file_[0-9]*.dat.gz", "file_D_800_x55.dat.gz", 800 * (avgMichalSize/2), 7200), - ("[A-D]-800-manyfiles-nflx-gz/file_[0-9]*.dat.gz", "file_E_800_x55.dat.gz", 800 * (avgMichalSize/2), 7200), - ("[A-D]-800-manyfiles-nflx-gz/file_[0-9]*.dat.gz", "file_F_800_x55.dat.gz", 800 * (avgMichalSize/2), 7200), - ("manyfiles-nflx-gz/file_[123][0-9][0-9].dat.gz", "file_300_A.dat.gz", 300 * avgMichalSize, 3600), - ("manyfiles-nflx-gz/file_[123][0-9][0-9].dat.gz", "file_300_B.dat.gz", 300 * avgMichalSize, 3600), - ("manyfiles-nflx-gz/file_[123][0-9][0-9].dat.gz", "file_300_C.dat.gz", 300 * avgMichalSize, 3600), +# ("[A-D]-800-manyfiles-nflx-gz/file_[0-9]*.dat.gz", "file_A_800_x55.dat.gz", 800 * (avgMichalSize/2), 7200), +# ("manyfiles-nflx-gz/file_[123][0-9][0-9].dat.gz", "file_300_A.dat.gz", 300 * avgMichalSize, 3600), +# ("[A-D]-800-manyfiles-nflx-gz/file_[0-9]*.dat.gz", "file_B_800_x55.dat.gz", 800 * (avgMichalSize/2), 7200), +# ("[A-D]-800-manyfiles-nflx-gz/file_[0-9]*.dat.gz", "file_C_800_x55.dat.gz", 800 * (avgMichalSize/2), 7200), +# ("[A-D]-800-manyfiles-nflx-gz/file_[0-9]*.dat.gz", "file_D_800_x55.dat.gz", 800 * (avgMichalSize/2), 7200), +# ("[A-D]-800-manyfiles-nflx-gz/file_[0-9]*.dat.gz", "file_E_800_x55.dat.gz", 800 * (avgMichalSize/2), 7200), +# ("[A-D]-800-manyfiles-nflx-gz/file_[0-9]*.dat.gz", "file_F_800_x55.dat.gz", 800 * (avgMichalSize/2), 7200), +# ("manyfiles-nflx-gz/file_[123][0-9][0-9].dat.gz", "file_300_B.dat.gz", 300 * avgMichalSize, 3600), +# ("manyfiles-nflx-gz/file_[123][0-9][0-9].dat.gz", "file_300_C.dat.gz", 300 * avgMichalSize, 3600), ("manyfiles-nflx-gz/file_1.dat.gz", "file_1.dat.gz", 1 * avgMichalSize, 300), ("manyfiles-nflx-gz/file_[2][0-9].dat.gz", "file_10.dat.gz", 10 * avgMichalSize, 700), ("manyfiles-nflx-gz/file_[34][0-9].dat.gz", "file_20.dat.gz", 20 * avgMichalSize, 900), @@ -76,12 +76,12 @@ def test_parse_nflx_loop_s3n_hdfs(self): # use i to forward reference in the list, so we can do multiple outstanding parses below for i, (csvFilepattern, csvFilename, totalBytes, timeoutSecs) in enumerate(csvFilenameList): ## for tryHeap in [54, 28]: - for tryHeap in [24]: + for tryHeap in [28]: print "\n", tryHeap,"GB heap, 1 jvm per host, import", protocol, "then parse" jea = "-XX:+UseParNewGC -XX:+UseConcMarkSweepGC" h2o_hosts.build_cloud_with_hosts(node_count=1, java_heap_GB=tryHeap, - java_extra_args=jea, + # java_extra_args=jea, enable_benchmark_log=True, timeoutSecs=120, retryDelaySecs=10, # all hdfs info is done thru the hdfs_config michal's ec2 config sets up? # this is for our amazon ec hdfs