Skip to content

Commit

Permalink
Smangrul/accelerate mp integrate (huggingface#23148)
Browse files Browse the repository at this point in the history
* mixed precision support via accelerate

* fix issues

* fix for the sharded ddp case

* fix flax and tf failing tests

* `refactor the place to create `Accelerator` object

* address comments by removing debugging print statements
  • Loading branch information
pacman100 authored May 31, 2023
1 parent de9255d commit 9f0646a
Show file tree
Hide file tree
Showing 2 changed files with 51 additions and 26 deletions.
68 changes: 42 additions & 26 deletions src/transformers/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,8 @@
if version.parse(accelerate_version) >= version.parse("0.16"):
from accelerate import skip_first_batches

from accelerate import Accelerator


if TYPE_CHECKING:
import optuna
Expand Down Expand Up @@ -337,6 +339,9 @@ def __init__(
self.deepspeed = None
self.is_in_train = False

# create accelerator object
self.accelerator = Accelerator()

# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
Expand Down Expand Up @@ -607,7 +612,7 @@ def __init__(
"but SageMaker Model Parallelism < 1.10 does not support FP16 in trainer."
)

if args.fp16 or args.bf16:
if (args.fp16 or args.bf16) and self.sharded_ddp is not None:
if args.half_precision_backend == "auto":
if args.device == torch.device("cpu"):
if args.fp16:
Expand All @@ -624,30 +629,31 @@ def __init__(
self.do_grad_scaling = False
if (args.fp16 or args.bf16) and not (args.deepspeed or is_sagemaker_mp_enabled()):
# deepspeed and SageMaker Model Parallel manage their own half precision
if args.half_precision_backend == "cuda_amp":
self.use_cuda_amp = True
self.amp_dtype = torch.float16 if args.fp16 else torch.bfloat16
# bf16 does not need grad scaling
self.do_grad_scaling = self.amp_dtype == torch.float16
if self.do_grad_scaling:
if self.sharded_ddp is not None:
self.scaler = ShardedGradScaler()
elif self.fsdp is not None:
from torch.distributed.fsdp.sharded_grad_scaler import (
ShardedGradScaler as FSDPShardedGradScaler,
)
if self.sharded_ddp is not None:
if args.half_precision_backend == "cuda_amp":
self.use_cuda_amp = True
self.amp_dtype = torch.float16 if args.fp16 else torch.bfloat16
# bf16 does not need grad scaling
self.do_grad_scaling = self.amp_dtype == torch.float16
if self.do_grad_scaling:
if self.sharded_ddp is not None:
self.scaler = ShardedGradScaler()
elif self.fsdp is not None:
from torch.distributed.fsdp.sharded_grad_scaler import (
ShardedGradScaler as FSDPShardedGradScaler,
)

self.scaler = FSDPShardedGradScaler()
elif is_torch_tpu_available():
from torch_xla.amp import GradScaler
self.scaler = FSDPShardedGradScaler()
elif is_torch_tpu_available():
from torch_xla.amp import GradScaler

self.scaler = GradScaler()
else:
self.scaler = torch.cuda.amp.GradScaler()
elif args.half_precision_backend == "cpu_amp":
self.use_cpu_amp = True
self.amp_dtype = torch.bfloat16
else:
self.scaler = GradScaler()
else:
self.scaler = torch.cuda.amp.GradScaler()
elif args.half_precision_backend == "cpu_amp":
self.use_cpu_amp = True
self.amp_dtype = torch.bfloat16
elif args.half_precision_backend == "apex":
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to"
Expand Down Expand Up @@ -1801,6 +1807,11 @@ def _inner_training_loop(
if delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)

# prepare using `accelerator` prepare
model, self.optimizer, self.lr_scheduler = self.accelerator.prepare(
self.model, self.optimizer, self.lr_scheduler
)

# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)

Expand Down Expand Up @@ -2013,10 +2024,15 @@ def _inner_training_loop(
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(args.max_grad_norm)
else:
elif self.use_apex:
# Revert to normal clipping otherwise, handling Apex or full precision
nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
amp.master_params(self.optimizer),
args.max_grad_norm,
)
else:
self.accelerator.clip_grad_norm_(
model.parameters(),
args.max_grad_norm,
)

Expand Down Expand Up @@ -2802,7 +2818,7 @@ def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor,
# loss gets scaled under gradient_accumulation_steps in deepspeed
loss = self.deepspeed.backward(loss)
else:
loss.backward()
self.accelerator.backward(loss)

return loss.detach()

Expand Down
9 changes: 9 additions & 0 deletions src/transformers/training_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -1562,6 +1562,15 @@ def __post_init__(self):
FutureWarning,
)

# if training args is specified, it will override the one specified in the accelerate config
if self.half_precision_backend != "apex" and len(self.sharded_ddp) == 0:
mixed_precision_dtype = os.environ.get("ACCELERATE_MIXED_PRECISION", "no")
if self.fp16:
mixed_precision_dtype = "fp16"
elif self.bf16:
mixed_precision_dtype = "bf16"
os.environ["ACCELERATE_MIXED_PRECISION"] = mixed_precision_dtype

def __str__(self):
self_as_dict = asdict(self)

Expand Down

0 comments on commit 9f0646a

Please sign in to comment.