Skip to content

Commit

Permalink
Temp fix cpu float16 inference bug
Browse files Browse the repository at this point in the history
  • Loading branch information
fsx950223 committed Jun 22, 2020
1 parent d1f1f07 commit 6b003e8
Showing 1 changed file with 11 additions and 1 deletion.
12 changes: 11 additions & 1 deletion efficientdet/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -578,7 +578,17 @@ def verify_feats_size(feats,
def get_precision(strategy: str, mixed_precision: bool = False):
"""Get the precision policy for a given strategy."""
if mixed_precision:
return 'mixed_bfloat16' if strategy == 'tpu' else 'mixed_float16'
if strategy == 'tpu':
return 'mixed_bfloat16'
else:
if len(tf.config.experimental.list_physical_devices('GPU')) > 0:
return 'mixed_float16'
else:
# TODO(fsx950223): Fix CPU float16 inference(https://github.com/google/automl/issues/504)
logging.warning("There are some bugs in CPU float16 kernel,"
" use float32 instead")
return 'float32'

return 'float32'


Expand Down

0 comments on commit 6b003e8

Please sign in to comment.