Skip to content

Commit

Permalink
Update pre-commit
Browse files Browse the repository at this point in the history
  • Loading branch information
benjeffery authored and mergify[bot] committed Apr 1, 2022
1 parent c8568d5 commit 691a294
Show file tree
Hide file tree
Showing 11 changed files with 73 additions and 68 deletions.
20 changes: 10 additions & 10 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,43 +1,43 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.2.0
rev: v4.1.0
hooks:
- id: check-merge-conflict
- id: debug-statements
- id: mixed-line-ending
- id: check-case-conflict
- id: check-yaml
- repo: https://github.com/benjeffery/pre-commit-clang-format
rev: '1.0'
rev: c21a74d089aaeb86c2c19df371c7e7bf40c07207
hooks:
- id: clang-format
exclude: avl
verbose: true
- repo: https://github.com/asottile/reorder_python_imports
rev: v2.3.5
rev: v3.0.1
hooks:
- id: reorder-python-imports
args: [ --unclassifiable-application-module=_tsinfer ]
- repo: https://github.com/asottile/pyupgrade
rev: v2.7.2
rev: v2.31.1
hooks:
- id: pyupgrade
args: [ --py3-plus, --py36-plus ]
args: [ --py3-plus, --py37-plus ]
- repo: https://github.com/psf/black
rev: 20.8b1
rev: 22.3.0
hooks:
- id: black
language_version: python3
- repo: https://github.com/asottile/blacken-docs
rev: v1.8.0
rev: v1.12.1
hooks:
- id: blacken-docs
args: [--skip-errors]
additional_dependencies: [black==20.8b1]
additional_dependencies: [black==22.3.0]
language_version: python3
- repo: https://gitlab.com/pycqa/flake8
rev: 3.8.3
rev: 3.9.2
hooks:
- id: flake8
args: [--config=.flake8]
additional_dependencies: ["flake8-bugbear==20.1.4", "flake8-builtins==1.5.2"]
additional_dependencies: ["flake8-bugbear==22.3.23", "flake8-builtins==1.5.3"]
12 changes: 6 additions & 6 deletions dev.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,13 +67,13 @@ def tsinfer_dev(

np.random.seed(seed)
random.seed(seed)
L_megabases = int(L * 10 ** 6)
L_megabases = int(L * 10**6)

# daiquiri.setup(level=log_level)

source_ts = msprime.simulate(
n,
Ne=10 ** 4,
Ne=10**4,
length=L_megabases,
recombination_rate=recombination_rate,
mutation_rate=1e-8,
Expand Down Expand Up @@ -211,7 +211,7 @@ def dump_provenance(ts):


def build_profile_inputs(n, num_megabases):
L = num_megabases * 10 ** 6
L = num_megabases * 10**6
input_file = "tmp__NOBACKUP__/profile-n={}-m={}.input.trees".format(
n, num_megabases
)
Expand All @@ -221,7 +221,7 @@ def build_profile_inputs(n, num_megabases):
ts = msprime.simulate(
n,
length=L,
Ne=10 ** 4,
Ne=10**4,
recombination_rate=1e-8,
mutation_rate=1e-8,
random_seed=10,
Expand Down Expand Up @@ -281,10 +281,10 @@ def tutorial_samples():

ts = msprime.simulate(
sample_size=10000,
Ne=10 ** 4,
Ne=10**4,
recombination_rate=1e-8,
mutation_rate=1e-8,
length=10 * 10 ** 6,
length=10 * 10**6,
random_seed=42,
)
ts.dump("tmp__NOBACKUP__/simulation-source.trees")
Expand Down
4 changes: 2 additions & 2 deletions docs/simulation-example.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,10 @@
if True:
ts = msprime.simulate(
sample_size=10000,
Ne=10 ** 4,
Ne=10**4,
recombination_rate=1e-8,
mutation_rate=1e-8,
length=10 * 10 ** 6,
length=10 * 10**6,
random_seed=42,
)
ts.dump("simulation-source.trees")
Expand Down
4 changes: 2 additions & 2 deletions docs/tutorial.rst
Original file line number Diff line number Diff line change
Expand Up @@ -130,10 +130,10 @@ data under the coalescent with recombination, using `msprime
ts = msprime.simulate(
sample_size=10000,
Ne=10 ** 4,
Ne=10**4,
recombination_rate=1e-8,
mutation_rate=1e-8,
length=10 * 10 ** 6,
length=10 * 10**6,
random_seed=42,
)
ts.dump("simulation-source.trees")
Expand Down
40 changes: 20 additions & 20 deletions evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ def edges_performance_worker(args):

def run_edges_performance(args):
num_lengths = 10
MB = 10 ** 6
MB = 10**6

work = []
rng = random.Random()
Expand All @@ -251,7 +251,7 @@ def run_edges_performance(args):
"mutation_rate": args.mutation_rate,
"Ne": args.Ne,
"model": "smc_prime",
"random_seed": rng.randint(1, 2 ** 30),
"random_seed": rng.randint(1, 2**30),
}
work.append((sim_args, args.compute_tree_metrics, args.engine))

Expand Down Expand Up @@ -423,7 +423,7 @@ def unrank(samples, n):

def edge_plot(ts, filename):
n = ts.num_samples
pallete = sns.color_palette("husl", 2 ** n - 1)
pallete = sns.color_palette("husl", 2**n - 1)
lines = []
colours = []
for tree in ts.trees():
Expand All @@ -445,7 +445,7 @@ def edge_plot(ts, filename):


def run_hotspot_analysis(args):
MB = 10 ** 6
MB = 10**6
L = args.length * MB

rng = random.Random()
Expand All @@ -467,7 +467,7 @@ def run_hotspot_analysis(args):
"recombination_map": recomb_map,
"mutation_rate": args.mutation_rate,
"Ne": args.Ne,
"random_seed": rng.randint(1, 2 ** 30),
"random_seed": rng.randint(1, 2**30),
}
ts = msprime.simulate(**sim_args)
print("simulated ", ts.num_trees, "trees and", ts.num_sites, "sites")
Expand Down Expand Up @@ -562,7 +562,7 @@ def ancestor_properties_worker(args):

def run_ancestor_properties(args):
num_lengths = 10
MB = 10 ** 6
MB = 10**6

work = []
rng = random.Random()
Expand All @@ -577,7 +577,7 @@ def run_ancestor_properties(args):
"mutation_rate": args.mutation_rate,
"Ne": args.Ne,
"model": "smc_prime",
"random_seed": rng.randint(1, 2 ** 30),
"random_seed": rng.randint(1, 2**30),
}
work.append((sim_args, not args.skip_exact))

Expand Down Expand Up @@ -702,7 +702,7 @@ def imputation_accuracy_worker(args):


def run_imputation_accuracy(args):
MB = 10 ** 6
MB = 10**6

work = []
rng = random.Random()
Expand All @@ -716,7 +716,7 @@ def run_imputation_accuracy(args):
"recombination_rate": args.recombination_rate,
"mutation_rate": args.mutation_rate,
"Ne": args.Ne,
"random_seed": rng.randint(1, 2 ** 30),
"random_seed": rng.randint(1, 2**30),
}
work.append((sim_args, missing_proportion))
# imputation_accuracy_worker((sim_args, missing_proportion))
Expand Down Expand Up @@ -802,7 +802,7 @@ def sim_true_and_inferred_ancestors(args):
Run a simulation under args and return the samples, plus the true and the inferred
ancestors
"""
MB = 10 ** 6
MB = 10**6
rng = random.Random(args.random_seed)
np.random.seed(args.random_seed)
sim_args = {
Expand All @@ -812,7 +812,7 @@ def sim_true_and_inferred_ancestors(args):
"mutation_rate": args.mutation_rate,
"Ne": args.Ne,
"model": "smc_prime",
"random_seed": rng.randint(1, 2 ** 30),
"random_seed": rng.randint(1, 2**30),
}
ts = msprime.simulate(**sim_args)

Expand Down Expand Up @@ -1342,7 +1342,7 @@ def run_ancestor_quality(args):
print(" " * (olap_start_exact - offset1), end="")
print(" " * (olap_start_estim - offset2), end="")
elif args.print_bad_ancestors == "inferred":
print("{:<5}".format(int(freq[focal_pos])), end="")
print(f"{int(freq[focal_pos]):<5}", end="")
k = 0
mask = estim_sites_mask[olap_start_estim:olap_end_estim]
for j, (bit, curr_pos) in enumerate(
Expand Down Expand Up @@ -1526,7 +1526,7 @@ def run_ancestor_quality(args):
color="k",
markeredgewidth=0.5,
markerfacecolor="w",
markersize=1 ** 0.5,
markersize=1**0.5,
),
mp.lines.Line2D(
[],
Expand All @@ -1537,7 +1537,7 @@ def run_ancestor_quality(args):
color="k",
markeredgewidth=0.5,
markerfacecolor="w",
markersize=10 ** 0.5,
markersize=10**0.5,
),
mp.lines.Line2D(
[],
Expand All @@ -1548,7 +1548,7 @@ def run_ancestor_quality(args):
color="k",
markeredgewidth=0.5,
markerfacecolor="w",
markersize=100 ** 0.5,
markersize=100**0.5,
),
]
name = "quality-by-freq-with-bias"
Expand Down Expand Up @@ -1704,7 +1704,7 @@ def get_node_degree_by_depth(ts):


def run_node_degree(args):
MB = 10 ** 6
MB = 10**6
rng = random.Random()
if args.random_seed is not None:
rng.seed(args.random_seed)
Expand All @@ -1715,7 +1715,7 @@ def run_node_degree(args):
"mutation_rate": args.mutation_rate,
"Ne": args.Ne,
"model": "smc_prime",
"random_seed": rng.randint(1, 2 ** 30),
"random_seed": rng.randint(1, 2**30),
}
smc_ts = msprime.simulate(**sim_args)

Expand Down Expand Up @@ -1804,11 +1804,11 @@ def run_perfect_inference(args):
rng = random.Random()
rng.seed(args.random_seed)
for _ in range(args.num_replicates):
seed = rng.randint(1, 2 ** 30)
seed = rng.randint(1, 2**30)
base_ts = msprime.simulate(
args.sample_size,
Ne=args.Ne,
length=args.length * 10 ** 6,
length=args.length * 10**6,
recombination_rate=1e-8,
random_seed=seed,
model=model,
Expand Down Expand Up @@ -1867,7 +1867,7 @@ def add_standard_arguments(
):
parser.add_argument("--destination-dir", "-d", default="")
parser.add_argument("--sample-size", "-n", type=int, default=sample_size)
parser.add_argument("--Ne", "-N", type=int, default=10 ** 4)
parser.add_argument("--Ne", "-N", type=int, default=10**4)
parser.add_argument(
"--length", "-l", type=float, default=length, help="Sequence length in MB"
)
Expand Down
24 changes: 16 additions & 8 deletions tests/test_formats.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ def test_too_small_max_file_size_init(self):

def test_too_small_max_file_size_add(self):
with tempfile.TemporaryDirectory(prefix="tsinf_format_test") as tempdir:
base_size = 2 ** 16 # Big enough to allow the initial file to be created
base_size = 2**16 # Big enough to allow the initial file to be created
# Fail during adding a large amount of data
with pytest.raises(lmdb.MapFullError):
filename = os.path.join(tempdir, "samples.tmp")
Expand All @@ -257,8 +257,8 @@ def test_acceptable_max_file_size(self):
with tempfile.TemporaryDirectory(prefix="tsinf_format_test") as tempdir:
# set a reasonably large number of sites and samples, and check we
# don't bomb out
n_samples = 2 ** 10
n_sites = 2 ** 12
n_samples = 2**10
n_sites = 2**12
np.random.seed(123)
filename = os.path.join(tempdir, "samples.tmp")
with formats.SampleData(
Expand Down Expand Up @@ -331,7 +331,7 @@ def test_from_tree_sequence_bad_times(self):
tables = ts.dump_tables()
# Associate nodes at different times with a single individual
nodes_time = tables.nodes.time
min_time = min([n.time for n in ts.nodes() if not n.is_sample()])
min_time = min(n.time for n in ts.nodes() if not n.is_sample())
nodes_time[ts.samples()] = np.linspace(0, min_time, n_individuals * ploidy)
tables.nodes.time = nodes_time
# Zap out the mutation times to avoid conflicts.
Expand Down Expand Up @@ -2324,7 +2324,9 @@ def test_ancestors_truncated_length(self):
assert np.array_equal(
trunc_lengths[time < upper_limit], original_lengths[time < upper_limit]
)
for orig_anc, trunc_anc in zip(ancestors.ancestors(), trunc_anc.ancestors()):
for orig_anc, trunc_anc in zip( # noqa: B020
ancestors.ancestors(), trunc_anc.ancestors()
):
assert orig_anc.time == trunc_anc.time
assert np.array_equal(orig_anc.focal_sites, trunc_anc.focal_sites)
if orig_anc.time >= upper_limit:
Expand All @@ -2348,7 +2350,9 @@ def test_truncate_extreme_interval(self):
ancestors = tsinfer.generate_ancestors(sample_data)
time = ancestors.ancestors_time[:]
trunc_anc = ancestors.truncate_ancestors(np.min(time), np.max(time), 1)
for orig_anc, trunc_anc in zip(ancestors.ancestors(), trunc_anc.ancestors()):
for orig_anc, trunc_anc in zip( # noqa: B020
ancestors.ancestors(), trunc_anc.ancestors()
):
assert orig_anc.start == trunc_anc.start
assert orig_anc.end == trunc_anc.end
assert orig_anc.time == trunc_anc.time
Expand All @@ -2359,7 +2363,9 @@ def test_truncate_extreme_interval(self):
ancestors = tsinfer.generate_ancestors(sample_data)
time = ancestors.ancestors_time[:]
trunc_anc = ancestors.truncate_ancestors(0, 1, 1)
for orig_anc, trunc_anc in zip(ancestors.ancestors(), trunc_anc.ancestors()):
for orig_anc, trunc_anc in zip( # noqa: B020
ancestors.ancestors(), trunc_anc.ancestors()
):
assert orig_anc.start == trunc_anc.start
assert orig_anc.end == trunc_anc.end
assert orig_anc.time == trunc_anc.time
Expand All @@ -2374,7 +2380,9 @@ def test_one_haplotype_truncated(self):
oldest_site = np.max(sites_time)
midpoint = np.median(sites_time)
trunc_anc = ancestors.truncate_ancestors(midpoint, oldest_site, 1)
for orig_anc, trunc_anc in zip(ancestors.ancestors(), trunc_anc.ancestors()):
for orig_anc, trunc_anc in zip( # noqa: B020
ancestors.ancestors(), trunc_anc.ancestors()
):
assert orig_anc.time == trunc_anc.time
assert np.array_equal(orig_anc.focal_sites, trunc_anc.focal_sites)

Expand Down
Loading

0 comments on commit 691a294

Please sign in to comment.