forked from pytorch/FBGEMM
-
Notifications
You must be signed in to change notification settings - Fork 3
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
more CK FP8 rowwise GEMM instances and tuning (pytorch#3455)
Summary: Pull Request resolved: pytorch#3455 X-link: facebookresearch/FBGEMM#539 Added some MFMA 16x16 instances that seem to help with power efficiency and use them in emu1.7 Reviewed By: jwfromm Differential Revision: D66776945 fbshipit-source-id: e8c6bf6b626b7528c49c1c0ec0578d4681eb2941
- Loading branch information
1 parent
0f8f5e6
commit 82bc8f4
Showing
4 changed files
with
168 additions
and
8 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
72 changes: 72 additions & 0 deletions
72
...s/fp8_rowwise_256x192x128x128_16x16_6x4_8x32x1_8x32x1_1x32x1x8_8x8x1_2x2_intrawave_v3.hip
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,72 @@ | ||
/* | ||
* Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* All rights reserved. | ||
* | ||
* This source code is licensed under the BSD-style license found in the | ||
* LICENSE file in the root directory of this source tree. | ||
*/ | ||
|
||
#include "fp8_rowwise_common.h" | ||
|
||
at::Tensor | ||
fp8_rowwise_256x192x128x128_16x16_6x4_8x32x1_8x32x1_1x32x1x8_8x8x1_2x2_intrawave_v3( | ||
at::Tensor XQ, | ||
at::Tensor WQ, | ||
at::Tensor x_scale, | ||
at::Tensor w_scale, | ||
at::Tensor Y) { | ||
// A kernel that seems to work well on mid sized tensors. | ||
|
||
// Check if this input needs to be padded. | ||
int M = size_to_dim_(XQ.dim() - 1, XQ.sizes()); | ||
int N = WQ.size(0); | ||
int K = WQ.size(1); | ||
bool pad = (K % 128 != 0); | ||
|
||
// Dispatch based on whether padding is needed or not. | ||
if (pad) { | ||
using DeviceGemmInstance = DeviceGemmHelper< | ||
256, | ||
192, | ||
128, | ||
128, | ||
16, | ||
16, | ||
6, | ||
4, | ||
S<8, 32, 1>, | ||
S<8, 32, 1>, | ||
S<1, 32, 1, 8>, | ||
S<8, 8, 1>, | ||
2, | ||
2, | ||
ck::BlockGemmPipelineScheduler::Intrawave, | ||
ck::BlockGemmPipelineVersion::v3, | ||
ck::tensor_operation::device::GemmSpecialization::KPadding>; | ||
// Run kernel instance. | ||
return f8f8bf16_rowwise_impl<DeviceGemmInstance>( | ||
XQ, WQ, x_scale, w_scale, Y); | ||
} else { | ||
using DeviceGemmInstance = DeviceGemmHelper< | ||
256, | ||
192, | ||
128, | ||
128, | ||
16, | ||
16, | ||
6, | ||
4, | ||
S<8, 32, 1>, | ||
S<8, 32, 1>, | ||
S<1, 32, 1, 8>, | ||
S<8, 8, 1>, | ||
2, | ||
2, | ||
ck::BlockGemmPipelineScheduler::Intrawave, | ||
ck::BlockGemmPipelineVersion::v3, | ||
ck::tensor_operation::device::GemmSpecialization::Default>; | ||
// Run kernel instance. | ||
return f8f8bf16_rowwise_impl<DeviceGemmInstance>( | ||
XQ, WQ, x_scale, w_scale, Y); | ||
} | ||
} |
72 changes: 72 additions & 0 deletions
72
...s/fp8_rowwise_256x192x256x128_16x16_6x8_8x32x1_8x32x1_1x32x1x8_8x8x1_2x2_intrawave_v3.hip
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,72 @@ | ||
/* | ||
* Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* All rights reserved. | ||
* | ||
* This source code is licensed under the BSD-style license found in the | ||
* LICENSE file in the root directory of this source tree. | ||
*/ | ||
|
||
#include "fp8_rowwise_common.h" | ||
|
||
at::Tensor | ||
fp8_rowwise_256x192x256x128_16x16_6x8_8x32x1_8x32x1_1x32x1x8_8x8x1_2x2_intrawave_v3( | ||
at::Tensor XQ, | ||
at::Tensor WQ, | ||
at::Tensor x_scale, | ||
at::Tensor w_scale, | ||
at::Tensor Y) { | ||
// A kernel that seems to work well on mid sized tensors. | ||
|
||
// Check if this input needs to be padded. | ||
int M = size_to_dim_(XQ.dim() - 1, XQ.sizes()); | ||
int N = WQ.size(0); | ||
int K = WQ.size(1); | ||
bool pad = (K % 128 != 0); | ||
|
||
// Dispatch based on whether padding is needed or not. | ||
if (pad) { | ||
using DeviceGemmInstance = DeviceGemmHelper< | ||
256, | ||
192, | ||
256, | ||
128, | ||
16, | ||
16, | ||
6, | ||
8, | ||
S<8, 32, 1>, | ||
S<8, 32, 1>, | ||
S<1, 32, 1, 8>, | ||
S<8, 8, 1>, | ||
2, | ||
2, | ||
ck::BlockGemmPipelineScheduler::Intrawave, | ||
ck::BlockGemmPipelineVersion::v3, | ||
ck::tensor_operation::device::GemmSpecialization::KPadding>; | ||
// Run kernel instance. | ||
return f8f8bf16_rowwise_impl<DeviceGemmInstance>( | ||
XQ, WQ, x_scale, w_scale, Y); | ||
} else { | ||
using DeviceGemmInstance = DeviceGemmHelper< | ||
256, | ||
192, | ||
256, | ||
128, | ||
16, | ||
16, | ||
6, | ||
8, | ||
S<8, 32, 1>, | ||
S<8, 32, 1>, | ||
S<1, 32, 1, 8>, | ||
S<8, 8, 1>, | ||
2, | ||
2, | ||
ck::BlockGemmPipelineScheduler::Intrawave, | ||
ck::BlockGemmPipelineVersion::v3, | ||
ck::tensor_operation::device::GemmSpecialization::Default>; | ||
// Run kernel instance. | ||
return f8f8bf16_rowwise_impl<DeviceGemmInstance>( | ||
XQ, WQ, x_scale, w_scale, Y); | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters