Skip to content

Commit

Permalink
gpu: intel: ocl: rename ref_ to simple_ for softmax
Browse files Browse the repository at this point in the history
  • Loading branch information
h-sadia committed Jul 23, 2024
1 parent 343e297 commit 2d13111
Show file tree
Hide file tree
Showing 4 changed files with 17 additions and 17 deletions.
6 changes: 3 additions & 3 deletions src/gpu/gpu_softmax_list.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@

#if DNNL_GPU_VENDOR == DNNL_VENDOR_INTEL
#include "gpu/intel/ocl/gen9_softmax.hpp"
#include "gpu/intel/ocl/ref_softmax.hpp"
#include "gpu/intel/ocl/reusable_softmax.hpp"
#include "gpu/intel/ocl/simple_softmax.hpp"
#endif

#if DNNL_GPU_VENDOR == DNNL_VENDOR_NVIDIA
Expand All @@ -43,7 +43,7 @@ const std::map<pk_impl_key_t, std::vector<impl_list_item_t>>
impl_list_map REG_SOFTMAX_P({
{{forward}, {
GPU_INSTANCE_INTEL(intel::ocl::gen9_softmax_fwd_t)
GPU_INSTANCE_INTEL(intel::ocl::ref_softmax_fwd_t)
GPU_INSTANCE_INTEL(intel::ocl::simple_softmax_fwd_t)
GPU_INSTANCE_INTEL(intel::ocl::reusable_softmax_fwd_t)
GPU_INSTANCE_NVIDIA(nvidia::cudnn_softmax_fwd_t)
GPU_INSTANCE_AMD(amd::miopen_softmax_fwd_t)
Expand All @@ -52,7 +52,7 @@ const std::map<pk_impl_key_t, std::vector<impl_list_item_t>>
}},
{{backward}, REG_BWD_PK({
GPU_INSTANCE_INTEL(intel::ocl::gen9_softmax_bwd_t)
GPU_INSTANCE_INTEL(intel::ocl::ref_softmax_bwd_t)
GPU_INSTANCE_INTEL(intel::ocl::simple_softmax_bwd_t)
GPU_INSTANCE_NVIDIA(nvidia::cudnn_softmax_bwd_t)
GPU_INSTANCE_AMD(amd::miopen_softmax_bwd_t)
GPU_INSTANCE_GENERIC_SYCL(generic::sycl::ref_sycl_softmax_bwd_t)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ __attribute__((intel_reqd_sub_group_size(SUB_GROUP_SIZE)))
#endif

__kernel void
ref_softmax_fwd_generic(__global SRC_DATA_T *src, __global DATA_T *dst,
simple_softmax_fwd_generic(__global SRC_DATA_T *src, __global DATA_T *dst,
__global float *src_scale, __global float *dst_scale POST_OP_ARGS) {

const int dim[] = {
Expand Down Expand Up @@ -202,8 +202,8 @@ __attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
__attribute__((intel_reqd_sub_group_size(SUB_GROUP_SIZE)))

__kernel void
ref_softmax_bwd_generic(__global DST_DATA_T *dst, __global SRC_DATA_T *diff_src,
__global DST_DATA_T *diff_dst) {
simple_softmax_bwd_generic(__global DST_DATA_T *dst,
__global SRC_DATA_T *diff_src, __global DST_DATA_T *diff_dst) {

const int dim[] = {
(get_global_id(0) / GROUP_SIZE) % BLOCK_0,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,15 @@
* limitations under the License.
*******************************************************************************/

#include "gpu/intel/ocl/ref_softmax.hpp"
#include "gpu/intel/ocl/simple_softmax.hpp"

namespace dnnl {
namespace impl {
namespace gpu {
namespace intel {
namespace ocl {

status_t ref_softmax_fwd_t::execute_generic(const exec_ctx_t &ctx) const {
status_t simple_softmax_fwd_t::execute_generic(const exec_ctx_t &ctx) const {
if (pd()->has_zero_dim_memory()) return status::success;

auto &src = CTX_IN_STORAGE(DNNL_ARG_SRC);
Expand All @@ -46,7 +46,7 @@ status_t ref_softmax_fwd_t::execute_generic(const exec_ctx_t &ctx) const {
}
}

status_t ref_softmax_bwd_t::execute_generic(const exec_ctx_t &ctx) const {
status_t simple_softmax_bwd_t::execute_generic(const exec_ctx_t &ctx) const {
if (pd()->has_zero_dim_memory()) return status::success;

auto &dst = CTX_IN_STORAGE(DNNL_ARG_DST);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@
* limitations under the License.
*******************************************************************************/

#ifndef GPU_INTEL_OCL_REF_SOFTMAX_HPP
#define GPU_INTEL_OCL_REF_SOFTMAX_HPP
#ifndef GPU_INTEL_OCL_SIMPLE_SOFTMAX_HPP
#define GPU_INTEL_OCL_SIMPLE_SOFTMAX_HPP

#include "common/c_types_map.hpp"
#include "common/primitive.hpp"
Expand All @@ -30,12 +30,12 @@ namespace gpu {
namespace intel {
namespace ocl {

struct ref_softmax_fwd_t : public gpu_primitive_t {
struct simple_softmax_fwd_t : public gpu_primitive_t {
using gpu_primitive_t::gpu_primitive_t;
struct pd_t : public gpu_softmax_fwd_pd_t {
using gpu_softmax_fwd_pd_t::gpu_softmax_fwd_pd_t;

DECLARE_COMMON_PD_T("ref:any", ref_softmax_fwd_t);
DECLARE_COMMON_PD_T("ocl:simple:any", simple_softmax_fwd_t);

bool post_ops_ok() const {
return attr()->post_ops_.has_default_values(
Expand Down Expand Up @@ -179,7 +179,7 @@ struct ref_softmax_fwd_t : public gpu_primitive_t {
kernel_ctx.define_int(utils::format("BLOCK_%d", i), pd()->block[i]);

CHECK(create_kernel(
engine, &kernel_, "ref_softmax_fwd_generic", kernel_ctx));
engine, &kernel_, "simple_softmax_fwd_generic", kernel_ctx));
if (!kernel_) return status::runtime_error;

return status::success;
Expand All @@ -195,12 +195,12 @@ struct ref_softmax_fwd_t : public gpu_primitive_t {
compute::kernel_t kernel_;
};

struct ref_softmax_bwd_t : public gpu_primitive_t {
struct simple_softmax_bwd_t : public gpu_primitive_t {
using gpu_primitive_t::gpu_primitive_t;
struct pd_t : public gpu_softmax_bwd_pd_t {
using gpu_softmax_bwd_pd_t::gpu_softmax_bwd_pd_t;

DECLARE_COMMON_PD_T("ref:any", ref_softmax_bwd_t);
DECLARE_COMMON_PD_T("ocl:simple:any", simple_softmax_bwd_t);

status_t init(impl::engine_t *engine) {
auto *compute_engine
Expand Down Expand Up @@ -302,7 +302,7 @@ struct ref_softmax_bwd_t : public gpu_primitive_t {
kernel_ctx.define_int(utils::format("BLOCK_%d", i), pd()->block[i]);

CHECK(create_kernel(
engine, &kernel_, "ref_softmax_bwd_generic", kernel_ctx));
engine, &kernel_, "simple_softmax_bwd_generic", kernel_ctx));
if (!kernel_) return status::runtime_error;

return status::success;
Expand Down

0 comments on commit 2d13111

Please sign in to comment.