Skip to content
This repository has been archived by the owner on Nov 1, 2024. It is now read-only.

Commit

Permalink
Rename master to main everwhere (#162)
Browse files Browse the repository at this point in the history
Summary:
Tested by running a CIFAR sweep

Pull Request resolved: #162

Reviewed By: pdollar

Differential Revision: D30412710

Pulled By: mannatsingh

fbshipit-source-id: 2eedd709a5fa7463f487c5e5f861f2fb519b0fd8
  • Loading branch information
mannatsingh authored and facebook-github-bot committed Aug 19, 2021
1 parent f8cd962 commit 8c79a8e
Show file tree
Hide file tree
Showing 7 changed files with 21 additions and 21 deletions.
2 changes: 1 addition & 1 deletion docs/CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ possible.
## Pull Requests
We actively welcome your pull requests.

1. Fork the repo and create your branch from `master`.
1. Fork the repo and create your branch from `main`.
2. If you've added code that should be tested, add tests.
3. If you've changed APIs, update the documentation.
4. Ensure the test suite passes.
Expand Down
4 changes: 2 additions & 2 deletions pycls/core/checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,8 @@ def has_checkpoint():

def save_checkpoint(model, model_ema, optimizer, epoch, test_err, ema_err):
"""Saves a checkpoint and also the best weights so far in a best checkpoint."""
# Save checkpoints only from the master process
if not dist.is_master_proc():
# Save checkpoints only from the main process
if not dist.is_main_proc():
return
# Ensure that the checkpoint dir exists
pathmgr.mkdirs(get_checkpoint_dir())
Expand Down
24 changes: 12 additions & 12 deletions pycls/core/distributed.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,15 +38,15 @@ def __call__(self):
self.fun()


def is_master_proc(local=False):
def is_main_proc(local=False):
"""
Determines if the current process is the master process.
Determines if the current process is the main process.
Master process is responsible for logging, writing and loading checkpoints. In
the multi GPU setting, we assign the master role to the rank 0 process. When
training using a single GPU, there is a single process which is considered master.
Main process is responsible for logging, writing and loading checkpoints. In
the multi GPU setting, we assign the main role to the rank 0 process. When
training using a single GPU, there is a single process which is considered main.
If local==True, then check if the current process is the master on the current node.
If local==True, then check if the current process is the main on the current node.
"""
m = cfg.MAX_GPUS_PER_NODE if local else cfg.NUM_GPUS
return cfg.NUM_GPUS == 1 or torch.distributed.get_rank() % m == 0
Expand Down Expand Up @@ -95,10 +95,10 @@ def setup_distributed(cfg_state):
torch.cuda.set_device(local_rank)


def single_proc_run(local_rank, fun, master_port, cfg_state, world_size):
def single_proc_run(local_rank, fun, main_port, cfg_state, world_size):
"""Executes fun() on a single GPU in a multi-GPU setup."""
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(master_port)
os.environ["MASTER_PORT"] = str(main_port)
os.environ["RANK"] = str(local_rank)
os.environ["LOCAL_RANK"] = str(local_rank)
os.environ["WORLD_SIZE"] = str(world_size)
Expand Down Expand Up @@ -129,15 +129,15 @@ def multi_proc_run(num_proc, fun):
slurm_constraint=launch.GPU_TYPE,
slurm_additional_parameters={"mail-user": launch.EMAIL, "mail-type": "END"},
)
master_port = random.randint(cfg.PORT_RANGE[0], cfg.PORT_RANGE[1])
job = executor.submit(SubmititRunner(master_port, fun, cfg))
main_port = random.randint(cfg.PORT_RANGE[0], cfg.PORT_RANGE[1])
job = executor.submit(SubmititRunner(main_port, fun, cfg))
print("Submitted job_id {} with out_dir: {}".format(job.job_id, cfg.OUT_DIR))
if not use_slurm:
job.wait()
elif num_proc > 1:
master_port = random.randint(cfg.PORT_RANGE[0], cfg.PORT_RANGE[1])
main_port = random.randint(cfg.PORT_RANGE[0], cfg.PORT_RANGE[1])
mp_runner = torch.multiprocessing.start_processes
args = (fun, master_port, cfg, num_proc)
args = (fun, main_port, cfg, num_proc)
# Note: using "fork" below, "spawn" causes time and error regressions. Using
# spawn changes the default multiprocessing context to spawn, which doesn't
# interact well with the dataloaders (likely due to the use of OpenCV).
Expand Down
4 changes: 2 additions & 2 deletions pycls/core/logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,8 @@ def ignore(*_objects, _sep=" ", _end="\n", _file=sys.stdout, _flush=False):

def setup_logging():
"""Sets up the logging."""
# Enable logging only for the master process
if dist.is_master_proc():
# Enable logging only for the main process
if dist.is_main_proc():
# Clear the root logger to prevent any existing logging config
# (e.g. set by another module) from messing with our setup
logging.root.handlers = []
Expand Down
4 changes: 2 additions & 2 deletions pycls/core/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@

def setup_env():
"""Sets up environment for training or testing."""
if dist.is_master_proc():
if dist.is_main_proc():
# Ensure that the output dir exists
pathmgr.mkdirs(cfg.OUT_DIR)
# Save the config
Expand Down Expand Up @@ -76,7 +76,7 @@ def setup_model():

def get_weights_file(weights_file):
"""Download weights file if stored as a URL."""
download = dist.is_master_proc(local=True)
download = dist.is_main_proc(local=True)
weights_file = cache_url(weights_file, cfg.DOWNLOAD_CACHE, download=download)
if cfg.NUM_GPUS > 1:
torch.distributed.barrier()
Expand Down
2 changes: 1 addition & 1 deletion pycls/models/model_zoo.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
_URL_WEIGHTS = "https://dl.fbaipublicfiles.com/pycls"

# URL prefix for model config files
_URL_CONFIGS = "https://raw.githubusercontent.com/facebookresearch/pycls/master/configs"
_URL_CONFIGS = "https://raw.githubusercontent.com/facebookresearch/pycls/main/configs"

# Model weights download cache directory
_DOWNLOAD_CACHE = "/tmp/pycls-download-cache"
Expand Down
2 changes: 1 addition & 1 deletion tools/sweep_launch_job.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def main():
# Print info about run
job_id = os.environ["SLURM_ARRAY_JOB_ID"]
task_id = os.environ["SLURM_ARRAY_TASK_ID"]
prt("Job array master job ID: {}".format(job_id))
prt("Job array main job ID: {}".format(job_id))
prt("Job array task ID (index): {}".format(task_id))
prt("Running job on: {}".format(str(os.uname())))
# Load what we need
Expand Down

0 comments on commit 8c79a8e

Please sign in to comment.