Skip to content

Commit

Permalink
set_num_threads
Browse files Browse the repository at this point in the history
Co-authored-by: Denis Kuznedelev <[email protected]>
  • Loading branch information
justheuristic and Godofnothing committed Jan 12, 2024
1 parent b260d7a commit 8e8b3ea
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 2 deletions.
2 changes: 1 addition & 1 deletion main.py
Original file line number Diff line number Diff line change
Expand Up @@ -690,7 +690,7 @@ def update_outs_parallel(
help="Skip model quantization and immediately evaluate the loaded model",
)

torch.set_num_threads(16)
torch.set_num_threads(min(16, torch.get_num_threads()))
torch.backends.cudnn.allow_tf32 = False
torch.backends.cuda.matmul.allow_tf32 = False

Expand Down
2 changes: 1 addition & 1 deletion notebooks/aq_simple.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
"from src.aq import QuantizedWeight\n",
"\n",
"\n",
"torch.set_num_threads(16)\n",
"torch.set_num_threads(min(16, saved_num_threads))\n",
"torch.backends.cudnn.allow_tf32 = False\n",
"torch.backends.cuda.matmul.allow_tf32 = False\n",
"device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
Expand Down

0 comments on commit 8e8b3ea

Please sign in to comment.