Skip to content

Commit

Permalink
[Lint] Add flake8-bugbear (ray-project#19053)
Browse files Browse the repository at this point in the history
* Add flake8-bugbear

* Add flake8-bugbear
  • Loading branch information
jjyao authored Oct 4, 2021
1 parent 2b44e9a commit 7588bfd
Show file tree
Hide file tree
Showing 20 changed files with 116 additions and 33 deletions.
16 changes: 16 additions & 0 deletions .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -24,4 +24,20 @@ ignore =
W605
I
N
B001
B002
B003
B004
B005
B007
B008
B009
B010
B011
B012
B013
B014
B015
B016
B017
avoid-escape = no
2 changes: 1 addition & 1 deletion .gitpod/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ RUN set -x; apt update \
&& mv bazel.gpg /etc/apt/trusted.gpg.d/ \
&& echo "deb [arch=amd64] https://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list \
&& apt update && apt install bazel-3.7.2 -y \
&& pip3 install cython==0.29.0 pytest pandas tree tabulate pexpect sklearn joblib yapf==0.23.0 flake8==3.9.1 mypy==0.782 flake8-quotes setproctitle==1.1.10 psutil \
&& pip3 install cython==0.29.0 pytest pandas tree tabulate pexpect sklearn joblib yapf==0.23.0 flake8==3.9.1 mypy==0.782 flake8-quotes flake8-bugbear==21.9.2 setproctitle==1.1.10 psutil \
&& python3 -c 'print("startup --output_base=/workspace/ray/.bazel-cache\nstartup --host_jvm_args=-Xmx1800m\nbuild --jobs=6")' > /etc/bazel.bazelrc

RUN update-alternatives --install /usr/local/bin/python python /usr/bin/python3 30 \
Expand Down
4 changes: 4 additions & 0 deletions ci/travis/format.sh
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,10 @@ if [[ $(flake8 --version) != *"flake8_quotes"* ]]; then
echo "WARNING: Ray uses flake8 with flake8_quotes. Might error without it. Install with: pip install flake8-quotes"
fi

if [[ $(flake8 --version) != *"flake8-bugbear"* ]]; then
echo "WARNING: Ray uses flake8 with flake8-bugbear. Might error without it. Install with: pip install flake8-bugbear"
fi

SHELLCHECK_FLAGS=(
--exclude=1090 # "Can't follow non-constant source. Use a directive to specify location."
--exclude=1091 # "Not following {file} due to some error"
Expand Down
12 changes: 8 additions & 4 deletions python/ray/data/datasource/file_based_datasource.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,9 +275,10 @@ def _expand_paths(paths: Union[str, List[str]],
return expanded_paths, file_infos


def _expand_directory(path: str,
filesystem: "pyarrow.fs.FileSystem",
exclude_prefixes: List[str] = [".", "_"]) -> List[str]:
def _expand_directory(
path: str,
filesystem: "pyarrow.fs.FileSystem",
exclude_prefixes: Optional[List[str]] = None) -> List[str]:
"""
Expand the provided directory path to a list of file paths.
Expand All @@ -292,6 +293,9 @@ def _expand_directory(path: str,
Returns:
A list of file paths contained in the provided directory.
"""
if exclude_prefixes is None:
exclude_prefixes = [".", "_"]

from pyarrow.fs import FileSelector
selector = FileSelector(path, recursive=True)
files = filesystem.get_file_info(selector)
Expand All @@ -304,7 +308,7 @@ def _expand_directory(path: str,
if not file_path.startswith(base_path):
continue
relative = file_path[len(base_path):]
if any(relative.startswith(prefix) for prefix in [".", "_"]):
if any(relative.startswith(prefix) for prefix in exclude_prefixes):
continue
filtered_paths.append((file_path, file_))
# We sort the paths to guarantee a stable order.
Expand Down
4 changes: 2 additions & 2 deletions python/ray/experimental/array/remote/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,8 @@ def diag(v, k=0):


@ray.remote
def transpose(a, axes=[]):
axes = None if axes == [] else axes
def transpose(a, axes=None):
axes = None if (axes == [] or axes is None) else axes
return np.transpose(a, axes=axes)


Expand Down
7 changes: 5 additions & 2 deletions python/ray/serve/http_proxy.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,8 +259,11 @@ def __init__(self,
port: int,
controller_name: str,
controller_namespace: str,
http_middlewares: List[
"starlette.middleware.Middleware"] = []): # noqa: F821
http_middlewares: Optional[List[
"starlette.middleware.Middleware"]] = None): # noqa: F821
if http_middlewares is None:
http_middlewares = []

self.host = host
self.port = port

Expand Down
5 changes: 4 additions & 1 deletion python/ray/tests/test_autoscaler.py
Original file line number Diff line number Diff line change
Expand Up @@ -464,7 +464,10 @@ def waitFor(self, condition, num_retries=50, fail_msg=None):
fail_msg = fail_msg or "Timed out waiting for {}".format(condition)
raise RayTestTimeoutException(fail_msg)

def waitForNodes(self, expected, comparison=None, tag_filters={}):
def waitForNodes(self, expected, comparison=None, tag_filters=None):
if tag_filters is None:
tag_filters = {}

MAX_ITER = 50
for i in range(MAX_ITER):
n = len(self.provider.non_terminated_nodes(tag_filters))
Expand Down
5 changes: 4 additions & 1 deletion python/ray/tests/test_resource_demand_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -1322,7 +1322,10 @@ def tearDown(self):
shutil.rmtree(self.tmpdir)
ray.shutdown()

def waitForNodes(self, expected, comparison=None, tag_filters={}):
def waitForNodes(self, expected, comparison=None, tag_filters=None):
if tag_filters is None:
tag_filters = {}

MAX_ITER = 50
for i in range(MAX_ITER):
n = len(self.provider.non_terminated_nodes(tag_filters))
Expand Down
5 changes: 4 additions & 1 deletion python/ray/tune/tests/test_trial_scheduler_pbt.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,10 @@ def MockTrainingFuncSync(config, checkpoint_dir=None):
def tearDown(self):
ray.shutdown()

def synchSetup(self, synch, param=[10, 20, 30]):
def synchSetup(self, synch, param=None):
if param is None:
param = [10, 20, 30]

scheduler = PopulationBasedTraining(
time_attr="training_iteration",
metric="mean_accuracy",
Expand Down
5 changes: 4 additions & 1 deletion python/ray/util/dask/scheduler_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -371,8 +371,11 @@ def fire_task():
return nested_get(result, state["cache"])


def apply_sync(func, args=(), kwds={}, callback=None):
def apply_sync(func, args=(), kwds=None, callback=None):
""" A naive synchronous version of apply_async """
if kwds is None:
kwds = {}

res = func(*args, **kwds)
if callback is not None:
callback(res)
5 changes: 4 additions & 1 deletion python/ray/workflow/step_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -401,7 +401,10 @@ def __reduce__(self):

def _record_step_status(step_id: "StepID",
status: "WorkflowStatus",
outputs: List["ObjectRef"] = []) -> None:
outputs: Optional[List["ObjectRef"]] = None) -> None:
if outputs is None:
outputs = []

workflow_id = workflow_context.get_current_workflow_id()
workflow_manager = get_management_actor()
ray.get(
Expand Down
1 change: 1 addition & 0 deletions python/requirements_linters.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
flake8==3.9.1
flake8-comprehensions
flake8-quotes==2.0.0
flake8-bugbear==21.9.2
mypy==0.782
yapf==0.23.0
12 changes: 9 additions & 3 deletions rllib/agents/ddpg/ddpg_tf_model.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import numpy as np
import gym
from typing import List
from typing import List, Optional

from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.utils.framework import try_import_tf
Expand Down Expand Up @@ -29,9 +29,9 @@ def __init__(
model_config: ModelConfigDict,
name: str,
# Extra DDPGActionModel args:
actor_hiddens: List[int] = [256, 256],
actor_hiddens: Optional[List[int]] = None,
actor_hidden_activation: str = "relu",
critic_hiddens: List[int] = [256, 256],
critic_hiddens: Optional[List[int]] = None,
critic_hidden_activation: str = "relu",
twin_q: bool = False,
add_layer_norm: bool = False):
Expand All @@ -48,6 +48,12 @@ def __init__(
should be defined in subclasses of DDPGActionModel.
"""

if actor_hiddens is None:
actor_hiddens = [256, 256]

if critic_hiddens is None:
critic_hiddens = [256, 256]

super(DDPGTFModel, self).__init__(obs_space, action_space, num_outputs,
model_config, name)

Expand Down
12 changes: 9 additions & 3 deletions rllib/agents/ddpg/ddpg_torch_model.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import numpy as np
import gym
from typing import List, Dict, Union
from typing import List, Dict, Union, Optional

from ray.rllib.models.torch.misc import SlimFC
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
Expand Down Expand Up @@ -31,9 +31,9 @@ def __init__(
model_config: ModelConfigDict,
name: str,
# Extra DDPGActionModel args:
actor_hiddens: List[int] = [256, 256],
actor_hiddens: Optional[List[int]] = None,
actor_hidden_activation: str = "relu",
critic_hiddens: List[int] = [256, 256],
critic_hiddens: Optional[List[int]] = None,
critic_hidden_activation: str = "relu",
twin_q: bool = False,
add_layer_norm: bool = False):
Expand All @@ -51,6 +51,12 @@ def __init__(
only defines the layers for the output heads. Those layers for
forward() should be defined in subclasses of DDPGTorchModel.
"""
if actor_hiddens is None:
actor_hiddens = [256, 256]

if critic_hiddens is None:
critic_hiddens = [256, 256]

nn.Module.__init__(self)
super(DDPGTorchModel, self).__init__(obs_space, action_space,
num_outputs, model_config, name)
Expand Down
5 changes: 4 additions & 1 deletion rllib/contrib/sumo/connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ def _stopping_condition(self, current_step_counter, until_end):
return True
return False

def step(self, until_end=False, agents=set()):
def step(self, until_end=False, agents=None):
"""
Runs a "learning" step and returns if the simulation has finished.
This function in meant to be called by the RLLIB Environment.
Expand All @@ -176,6 +176,9 @@ def step(self, until_end=False, agents=set()):
Return:
Bool. True iff the simulation is still ongoing.
"""
if agents is None:
agents = set()

# Execute SUMO steps until the learning needs to happen
current_step_counter = 0
logger.debug(
Expand Down
18 changes: 14 additions & 4 deletions rllib/evaluation/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,15 @@ def get_learner_stats(grad_info: GradInfoDict) -> LearnerStatsDict:

@DeveloperAPI
def collect_metrics(local_worker: Optional["RolloutWorker"] = None,
remote_workers: List[ActorHandle] = [],
to_be_collected: List[ObjectRef] = [],
remote_workers: Optional[List[ActorHandle]] = None,
to_be_collected: Optional[List[ObjectRef]] = None,
timeout_seconds: int = 180) -> ResultDict:
"""Gathers episode metrics from RolloutWorker instances."""
if remote_workers is None:
remote_workers = []

if to_be_collected is None:
to_be_collected = []

episodes, to_be_collected = collect_episodes(
local_worker,
Expand All @@ -73,11 +78,16 @@ def collect_metrics(local_worker: Optional["RolloutWorker"] = None,
@DeveloperAPI
def collect_episodes(
local_worker: Optional["RolloutWorker"] = None,
remote_workers: List[ActorHandle] = [],
to_be_collected: List[ObjectRef] = [],
remote_workers: Optional[List[ActorHandle]] = None,
to_be_collected: Optional[List[ObjectRef]] = None,
timeout_seconds: int = 180
) -> Tuple[List[Union[RolloutMetrics, OffPolicyEstimate]], List[ObjectRef]]:
"""Gathers new episodes metrics tuples from the given evaluators."""
if remote_workers is None:
remote_workers = []

if to_be_collected is None:
to_be_collected = []

if remote_workers:
pending = [
Expand Down
11 changes: 8 additions & 3 deletions rllib/examples/env/coin_game_non_vectorized_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from gym.utils import seeding
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.rllib.utils import override
from typing import Dict
from typing import Dict, Optional

from ray.rllib.examples.env.utils.interfaces import InfoAccumulationInterface

Expand All @@ -36,7 +36,9 @@ class CoinGame(InfoAccumulationInterface, MultiAgentEnv, gym.Env):
np.array([-1, 0]),
]

def __init__(self, config: Dict = {}):
def __init__(self, config: Optional[Dict] = None):
if config is None:
config = {}

self._validate_config(config)

Expand Down Expand Up @@ -325,7 +327,10 @@ def _init_info(self):
class AsymCoinGame(CoinGame):
NAME = "AsymCoinGame"

def __init__(self, config: dict = {}):
def __init__(self, config: Optional[dict] = None):
if config is None:
config = {}

if "asymmetric" in config:
assert config["asymmetric"]
else:
Expand Down
9 changes: 7 additions & 2 deletions rllib/examples/env/coin_game_vectorized_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,9 @@ class VectorizedCoinGame(CoinGame):
Vectorized Coin Game environment.
"""

def __init__(self, config={}):
def __init__(self, config=None):
if config is None:
config = {}

super().__init__(config)

Expand Down Expand Up @@ -159,7 +161,10 @@ def _load_env(self, env_state):
class AsymVectorizedCoinGame(VectorizedCoinGame):
NAME = "AsymCoinGame"

def __init__(self, config={}):
def __init__(self, config=None):
if config is None:
config = {}

if "asymmetric" in config:
assert config["asymmetric"]
else:
Expand Down
6 changes: 4 additions & 2 deletions rllib/examples/env/matrix_sequential_social_dilemma.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import logging
from abc import ABC
from collections import Iterable
from typing import Dict
from typing import Dict, Optional

import numpy as np
from gym.spaces import Discrete
Expand Down Expand Up @@ -39,7 +39,9 @@ class MatrixSequentialSocialDilemma(InfoAccumulationInterface, MultiAgentEnv,
episode.
"""

def __init__(self, config: Dict = {}):
def __init__(self, config: Optional[Dict] = None):
if config is None:
config = {}

assert "reward_randomness" not in config.keys()
assert self.PAYOUT_MATRIX is not None
Expand Down
5 changes: 4 additions & 1 deletion rllib/utils/tf_run_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,10 @@ def get(self, to_fetch):
_count = 0


def run_timeline(sess, ops, debug_name, feed_dict={}, timeline_dir=None):
def run_timeline(sess, ops, debug_name, feed_dict=None, timeline_dir=None):
if feed_dict is None:
feed_dict = {}

if timeline_dir:
from tensorflow.python.client import timeline

Expand Down

0 comments on commit 7588bfd

Please sign in to comment.