diff --git a/gen/gen.ml b/gen/gen.ml index ae9b0394..69e39321 100644 --- a/gen/gen.ml +++ b/gen/gen.ml @@ -135,6 +135,7 @@ module Func = struct | TensorOptions (* Tensor kind and device *) | Scalar | ScalarType + | ScalarTypeOption | Device | String | Layout @@ -169,7 +170,7 @@ module Func = struct | "const at::itensorlistref &" | "at::tensorlist" -> Some TensorList | "at::device" -> Some Device | "const at::scalar &" | "at::scalar" -> Some Scalar - | "at::scalartype" -> Some ScalarType + | "at::scalartype" -> if is_nullable then Some ScalarTypeOption else Some ScalarType | "c10::string_view" -> Some String | "at::layout" -> Some (if is_nullable then LayoutOption else Layout) | _ -> None @@ -195,6 +196,7 @@ module Func = struct | Tensor -> "tensor" | TensorOption -> "tensor" | ScalarType -> "int" + | ScalarTypeOption -> "int" | Device -> "int" | Scalar -> "scalar" | Layout | LayoutOption -> "int8_t" @@ -256,6 +258,7 @@ module Func = struct arg_name arg_name | ScalarType -> Printf.sprintf "at::ScalarType(%s)" arg_name + | ScalarTypeOption -> Printf.sprintf "%s < 0 ? c10::nullopt : c10::optional(at::ScalarType(%s))" arg_name arg_name | Device -> Printf.sprintf "device_of_int(%s)" arg_name | _ -> arg_name) |> String.concat ~sep:", " @@ -310,7 +313,7 @@ module Func = struct | Tensor -> single_param "*mut C_tensor" | TensorOption -> single_param "*mut C_tensor" | Scalar -> single_param "*mut C_scalar" - | ScalarType -> single_param "c_int" + | ScalarType | ScalarTypeOption-> single_param "c_int" | Device -> single_param "c_int" | String -> Printf.sprintf "%s_ptr: *const u8, %s_len: c_int" an an | IntList | IntListOption -> @@ -397,6 +400,7 @@ module Func = struct | DoubleOption -> "impl Into>" | Scalar -> "S" | ScalarType -> "Kind" + | ScalarTypeOption -> "impl Into>" | Device -> "Device" in Printf.sprintf "%s: %s" (rust_name arg.arg_name) rust_arg_type) @@ -450,6 +454,7 @@ module Func = struct | Scalar -> Printf.sprintf "%s.into().c_scalar" name | Bool -> Printf.sprintf "if %s { 1 } else { 0 }" name | ScalarType -> Printf.sprintf "%s.c_int()" name + | ScalarTypeOption -> Printf.sprintf "%s.into().map_or(-1, |s| s.c_int())" name | Device -> Printf.sprintf "%s.c_int()" name | TensorOptions -> Printf.sprintf "%s.0.c_int(), %s.1.c_int()" name name | Int64Option -> Printf.sprintf "%s.unwrap_or(0i64), %s.is_none() as i8" name name diff --git a/src/wrappers/tensor_fallible_generated.rs b/src/wrappers/tensor_fallible_generated.rs index 24601eb6..668599df 100644 --- a/src/wrappers/tensor_fallible_generated.rs +++ b/src/wrappers/tensor_fallible_generated.rs @@ -616,7 +616,7 @@ impl Tensor { a: &Tensor, size: impl Into>, stride: impl Into>, - dtype: Kind, + dtype: impl Into>, ) -> Result<(), TchError> { let size = size.into(); let stride = stride.into(); @@ -626,7 +626,7 @@ impl Tensor { size.as_ref().map_or(-1, |t| t.len() as i32), stride.as_ref().map_or(std::ptr::null_mut(), |t| t.as_ptr()), stride.as_ref().map_or(-1, |t| t.len() as i32), - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(()) } @@ -5230,7 +5230,7 @@ impl Tensor { sobolstate: &Tensor, dimension: i64, num_generated: i64, - dtype: Kind, + dtype: impl Into>, ) -> Result<(Tensor, Tensor), TchError> { let mut c_tensors = [std::ptr::null_mut(); 2]; unsafe_torch_err!(atg__sobol_engine_draw( @@ -5240,7 +5240,7 @@ impl Tensor { sobolstate.c_tensor, dimension, num_generated, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })) } @@ -5615,7 +5615,7 @@ impl Tensor { &self, dim: &[i64], keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; unsafe_torch_err!(atg__sparse_csr_prod( @@ -5624,7 +5624,7 @@ impl Tensor { dim.as_ptr(), dim.len() as i32, if keepdim { 1 } else { 0 }, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -5634,7 +5634,7 @@ impl Tensor { out: &Tensor, dim: &[i64], keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; unsafe_torch_err!(atg__sparse_csr_prod_dim_dtype_out( @@ -5644,7 +5644,7 @@ impl Tensor { dim.as_ptr(), dim.len() as i32, if keepdim { 1 } else { 0 }, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -5653,7 +5653,7 @@ impl Tensor { &self, dim: &[i64], keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; unsafe_torch_err!(atg__sparse_csr_sum( @@ -5662,7 +5662,7 @@ impl Tensor { dim.as_ptr(), dim.len() as i32, if keepdim { 1 } else { 0 }, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -5672,7 +5672,7 @@ impl Tensor { out: &Tensor, dim: &[i64], keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; unsafe_torch_err!(atg__sparse_csr_sum_dim_dtype_out( @@ -5682,7 +5682,7 @@ impl Tensor { dim.as_ptr(), dim.len() as i32, if keepdim { 1 } else { 0 }, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -5762,14 +5762,14 @@ impl Tensor { pub fn f_internal_sparse_log_softmax_int( &self, dim: i64, - dtype: Kind, + dtype: impl Into>, ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; unsafe_torch_err!(atg__sparse_log_softmax_int( c_tensors.as_mut_ptr(), self.c_tensor, dim, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -5880,13 +5880,17 @@ impl Tensor { Ok(Tensor { c_tensor: c_tensors[0] }) } - pub fn f_internal_sparse_softmax_int(&self, dim: i64, dtype: Kind) -> Result { + pub fn f_internal_sparse_softmax_int( + &self, + dim: i64, + dtype: impl Into>, + ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; unsafe_torch_err!(atg__sparse_softmax_int( c_tensors.as_mut_ptr(), self.c_tensor, dim, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -6426,19 +6430,27 @@ impl Tensor { Ok(r__) } - pub fn f_internal_to_dense(&self, dtype: Kind) -> Result { + pub fn f_internal_to_dense(&self, dtype: impl Into>) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; - unsafe_torch_err!(atg__to_dense(c_tensors.as_mut_ptr(), self.c_tensor, dtype.c_int())); + unsafe_torch_err!(atg__to_dense( + c_tensors.as_mut_ptr(), + self.c_tensor, + dtype.into().map_or(-1, |s| s.c_int()) + )); Ok(Tensor { c_tensor: c_tensors[0] }) } - pub fn f_internal_to_dense_out(&self, out: &Tensor, dtype: Kind) -> Result { + pub fn f_internal_to_dense_out( + &self, + out: &Tensor, + dtype: impl Into>, + ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; unsafe_torch_err!(atg__to_dense_out( c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -13302,15 +13314,29 @@ impl Tensor { Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] })) } - pub fn f_cumprod(&self, dim: i64, dtype: Kind) -> Result { + pub fn f_cumprod(&self, dim: i64, dtype: impl Into>) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; - unsafe_torch_err!(atg_cumprod(c_tensors.as_mut_ptr(), self.c_tensor, dim, dtype.c_int())); + unsafe_torch_err!(atg_cumprod( + c_tensors.as_mut_ptr(), + self.c_tensor, + dim, + dtype.into().map_or(-1, |s| s.c_int()) + )); Ok(Tensor { c_tensor: c_tensors[0] }) } - pub fn f_cumprod_(&mut self, dim: i64, dtype: Kind) -> Result { + pub fn f_cumprod_( + &mut self, + dim: i64, + dtype: impl Into>, + ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; - unsafe_torch_err!(atg_cumprod_(c_tensors.as_mut_ptr(), self.c_tensor, dim, dtype.c_int())); + unsafe_torch_err!(atg_cumprod_( + c_tensors.as_mut_ptr(), + self.c_tensor, + dim, + dtype.into().map_or(-1, |s| s.c_int()) + )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -13331,38 +13357,62 @@ impl Tensor { Ok(Tensor { c_tensor: c_tensors[0] }) } - pub fn f_cumprod_out(&self, out: &Tensor, dim: i64, dtype: Kind) -> Result { + pub fn f_cumprod_out( + &self, + out: &Tensor, + dim: i64, + dtype: impl Into>, + ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; unsafe_torch_err!(atg_cumprod_out( c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor, dim, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } - pub fn f_cumsum(&self, dim: i64, dtype: Kind) -> Result { + pub fn f_cumsum(&self, dim: i64, dtype: impl Into>) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; - unsafe_torch_err!(atg_cumsum(c_tensors.as_mut_ptr(), self.c_tensor, dim, dtype.c_int())); + unsafe_torch_err!(atg_cumsum( + c_tensors.as_mut_ptr(), + self.c_tensor, + dim, + dtype.into().map_or(-1, |s| s.c_int()) + )); Ok(Tensor { c_tensor: c_tensors[0] }) } - pub fn f_cumsum_(&mut self, dim: i64, dtype: Kind) -> Result { + pub fn f_cumsum_( + &mut self, + dim: i64, + dtype: impl Into>, + ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; - unsafe_torch_err!(atg_cumsum_(c_tensors.as_mut_ptr(), self.c_tensor, dim, dtype.c_int())); + unsafe_torch_err!(atg_cumsum_( + c_tensors.as_mut_ptr(), + self.c_tensor, + dim, + dtype.into().map_or(-1, |s| s.c_int()) + )); Ok(Tensor { c_tensor: c_tensors[0] }) } - pub fn f_cumsum_out(&self, out: &Tensor, dim: i64, dtype: Kind) -> Result { + pub fn f_cumsum_out( + &self, + out: &Tensor, + dim: i64, + dtype: impl Into>, + ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; unsafe_torch_err!(atg_cumsum_out( c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor, dim, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -20552,7 +20602,7 @@ impl Tensor { ord: S, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Result { let dim = dim.into(); let mut c_tensors = [std::ptr::null_mut(); 1]; @@ -20563,7 +20613,7 @@ impl Tensor { dim.as_ref().map_or(std::ptr::null_mut(), |t| t.as_ptr()), dim.as_ref().map_or(-1, |t| t.len() as i32), if keepdim { 1 } else { 0 }, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -20573,7 +20623,7 @@ impl Tensor { ord: &str, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Result { let dim = dim.into(); let mut c_tensors = [std::ptr::null_mut(); 1]; @@ -20585,7 +20635,7 @@ impl Tensor { dim.as_ref().map_or(std::ptr::null_mut(), |t| t.as_ptr()), dim.as_ref().map_or(-1, |t| t.len() as i32), if keepdim { 1 } else { 0 }, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -20596,7 +20646,7 @@ impl Tensor { ord: &str, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Result { let dim = dim.into(); let mut c_tensors = [std::ptr::null_mut(); 1]; @@ -20609,7 +20659,7 @@ impl Tensor { dim.as_ref().map_or(std::ptr::null_mut(), |t| t.as_ptr()), dim.as_ref().map_or(-1, |t| t.len() as i32), if keepdim { 1 } else { 0 }, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -20620,7 +20670,7 @@ impl Tensor { ord: S, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Result { let dim = dim.into(); let mut c_tensors = [std::ptr::null_mut(); 1]; @@ -20632,7 +20682,7 @@ impl Tensor { dim.as_ref().map_or(std::ptr::null_mut(), |t| t.as_ptr()), dim.as_ref().map_or(-1, |t| t.len() as i32), if keepdim { 1 } else { 0 }, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -21301,13 +21351,17 @@ impl Tensor { Ok(Tensor { c_tensor: c_tensors[0] }) } - pub fn f_log_softmax(&self, dim: i64, dtype: Kind) -> Result { + pub fn f_log_softmax( + &self, + dim: i64, + dtype: impl Into>, + ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; unsafe_torch_err!(atg_log_softmax( c_tensors.as_mut_ptr(), self.c_tensor, dim, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -21316,7 +21370,7 @@ impl Tensor { &self, out: &Tensor, dim: i64, - dtype: Kind, + dtype: impl Into>, ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; unsafe_torch_err!(atg_log_softmax_int_out( @@ -21324,7 +21378,7 @@ impl Tensor { out.c_tensor, self.c_tensor, dim, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -22660,9 +22714,13 @@ impl Tensor { Ok(Tensor { c_tensor: c_tensors[0] }) } - pub fn f_mean(&self, dtype: Kind) -> Result { + pub fn f_mean(&self, dtype: impl Into>) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; - unsafe_torch_err!(atg_mean(c_tensors.as_mut_ptr(), self.c_tensor, dtype.c_int())); + unsafe_torch_err!(atg_mean( + c_tensors.as_mut_ptr(), + self.c_tensor, + dtype.into().map_or(-1, |s| s.c_int()) + )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -22670,7 +22728,7 @@ impl Tensor { &self, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Result { let dim = dim.into(); let mut c_tensors = [std::ptr::null_mut(); 1]; @@ -22680,7 +22738,7 @@ impl Tensor { dim.as_ref().map_or(std::ptr::null_mut(), |t| t.as_ptr()), dim.as_ref().map_or(-1, |t| t.len() as i32), if keepdim { 1 } else { 0 }, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -22690,7 +22748,7 @@ impl Tensor { out: &Tensor, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Result { let dim = dim.into(); let mut c_tensors = [std::ptr::null_mut(); 1]; @@ -22701,7 +22759,7 @@ impl Tensor { dim.as_ref().map_or(std::ptr::null_mut(), |t| t.as_ptr()), dim.as_ref().map_or(-1, |t| t.len() as i32), if keepdim { 1 } else { 0 }, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -24690,7 +24748,7 @@ impl Tensor { &self, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Result { let dim = dim.into(); let mut c_tensors = [std::ptr::null_mut(); 1]; @@ -24700,7 +24758,7 @@ impl Tensor { dim.as_ref().map_or(std::ptr::null_mut(), |t| t.as_ptr()), dim.as_ref().map_or(-1, |t| t.len() as i32), if keepdim { 1 } else { 0 }, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -24710,7 +24768,7 @@ impl Tensor { out: &Tensor, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Result { let dim = dim.into(); let mut c_tensors = [std::ptr::null_mut(); 1]; @@ -24721,7 +24779,7 @@ impl Tensor { dim.as_ref().map_or(std::ptr::null_mut(), |t| t.as_ptr()), dim.as_ref().map_or(-1, |t| t.len() as i32), if keepdim { 1 } else { 0 }, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -24864,7 +24922,7 @@ impl Tensor { &self, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Result { let dim = dim.into(); let mut c_tensors = [std::ptr::null_mut(); 1]; @@ -24874,7 +24932,7 @@ impl Tensor { dim.as_ref().map_or(std::ptr::null_mut(), |t| t.as_ptr()), dim.as_ref().map_or(-1, |t| t.len() as i32), if keepdim { 1 } else { 0 }, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -24884,7 +24942,7 @@ impl Tensor { out: &Tensor, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Result { let dim = dim.into(); let mut c_tensors = [std::ptr::null_mut(); 1]; @@ -24895,7 +24953,7 @@ impl Tensor { dim.as_ref().map_or(std::ptr::null_mut(), |t| t.as_ptr()), dim.as_ref().map_or(-1, |t| t.len() as i32), if keepdim { 1 } else { 0 }, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -25225,7 +25283,7 @@ impl Tensor { p: S, dim: &[i64], keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; unsafe_torch_err!(atg_native_norm_scalaropt_dim_dtype( @@ -25235,7 +25293,7 @@ impl Tensor { dim.as_ptr(), dim.len() as i32, if keepdim { 1 } else { 0 }, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -25246,7 +25304,7 @@ impl Tensor { p: S, dim: &[i64], keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; unsafe_torch_err!(atg_native_norm_scalaropt_dim_dtype_out( @@ -25257,7 +25315,7 @@ impl Tensor { dim.as_ptr(), dim.len() as i32, if keepdim { 1 } else { 0 }, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -26481,20 +26539,29 @@ impl Tensor { Ok(Tensor { c_tensor: c_tensors[0] }) } - pub fn f_prod(&self, dtype: Kind) -> Result { + pub fn f_prod(&self, dtype: impl Into>) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; - unsafe_torch_err!(atg_prod(c_tensors.as_mut_ptr(), self.c_tensor, dtype.c_int())); + unsafe_torch_err!(atg_prod( + c_tensors.as_mut_ptr(), + self.c_tensor, + dtype.into().map_or(-1, |s| s.c_int()) + )); Ok(Tensor { c_tensor: c_tensors[0] }) } - pub fn f_prod_dim_int(&self, dim: i64, keepdim: bool, dtype: Kind) -> Result { + pub fn f_prod_dim_int( + &self, + dim: i64, + keepdim: bool, + dtype: impl Into>, + ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; unsafe_torch_err!(atg_prod_dim_int( c_tensors.as_mut_ptr(), self.c_tensor, dim, if keepdim { 1 } else { 0 }, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -26504,7 +26571,7 @@ impl Tensor { out: &Tensor, dim: i64, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; unsafe_torch_err!(atg_prod_int_out( @@ -26513,18 +26580,22 @@ impl Tensor { self.c_tensor, dim, if keepdim { 1 } else { 0 }, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } - pub fn f_prod_out(&self, out: &Tensor, dtype: Kind) -> Result { + pub fn f_prod_out( + &self, + out: &Tensor, + dtype: impl Into>, + ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; unsafe_torch_err!(atg_prod_out( c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -30367,9 +30438,14 @@ impl Tensor { Ok(Tensor { c_tensor: c_tensors[0] }) } - pub fn f_softmax(&self, dim: i64, dtype: Kind) -> Result { + pub fn f_softmax(&self, dim: i64, dtype: impl Into>) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; - unsafe_torch_err!(atg_softmax(c_tensors.as_mut_ptr(), self.c_tensor, dim, dtype.c_int())); + unsafe_torch_err!(atg_softmax( + c_tensors.as_mut_ptr(), + self.c_tensor, + dim, + dtype.into().map_or(-1, |s| s.c_int()) + )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -30377,7 +30453,7 @@ impl Tensor { &self, out: &Tensor, dim: i64, - dtype: Kind, + dtype: impl Into>, ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; unsafe_torch_err!(atg_softmax_int_out( @@ -30385,7 +30461,7 @@ impl Tensor { out.c_tensor, self.c_tensor, dim, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -31976,13 +32052,17 @@ impl Tensor { Ok(Tensor { c_tensor: c_tensors[0] }) } - pub fn f_special_log_softmax(&self, dim: i64, dtype: Kind) -> Result { + pub fn f_special_log_softmax( + &self, + dim: i64, + dtype: impl Into>, + ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; unsafe_torch_err!(atg_special_log_softmax( c_tensors.as_mut_ptr(), self.c_tensor, dim, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -32601,13 +32681,17 @@ impl Tensor { Ok(Tensor { c_tensor: c_tensors[0] }) } - pub fn f_special_softmax(&self, dim: i64, dtype: Kind) -> Result { + pub fn f_special_softmax( + &self, + dim: i64, + dtype: impl Into>, + ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; unsafe_torch_err!(atg_special_softmax( c_tensors.as_mut_ptr(), self.c_tensor, dim, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -33513,9 +33597,13 @@ impl Tensor { Ok(Tensor { c_tensor: c_tensors[0] }) } - pub fn f_sum(&self, dtype: Kind) -> Result { + pub fn f_sum(&self, dtype: impl Into>) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; - unsafe_torch_err!(atg_sum(c_tensors.as_mut_ptr(), self.c_tensor, dtype.c_int())); + unsafe_torch_err!(atg_sum( + c_tensors.as_mut_ptr(), + self.c_tensor, + dtype.into().map_or(-1, |s| s.c_int()) + )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -33523,7 +33611,7 @@ impl Tensor { &self, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Result { let dim = dim.into(); let mut c_tensors = [std::ptr::null_mut(); 1]; @@ -33533,7 +33621,7 @@ impl Tensor { dim.as_ref().map_or(std::ptr::null_mut(), |t| t.as_ptr()), dim.as_ref().map_or(-1, |t| t.len() as i32), if keepdim { 1 } else { 0 }, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -33543,7 +33631,7 @@ impl Tensor { out: &Tensor, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Result { let dim = dim.into(); let mut c_tensors = [std::ptr::null_mut(); 1]; @@ -33554,18 +33642,22 @@ impl Tensor { dim.as_ref().map_or(std::ptr::null_mut(), |t| t.as_ptr()), dim.as_ref().map_or(-1, |t| t.len() as i32), if keepdim { 1 } else { 0 }, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } - pub fn f_sum_out(&self, out: &Tensor, dtype: Kind) -> Result { + pub fn f_sum_out( + &self, + out: &Tensor, + dtype: impl Into>, + ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; unsafe_torch_err!(atg_sum_out( c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -33984,9 +34076,13 @@ impl Tensor { Ok(Tensor { c_tensor: c_tensors[0] }) } - pub fn f_to_dense(&self, dtype: Kind) -> Result { + pub fn f_to_dense(&self, dtype: impl Into>) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; - unsafe_torch_err!(atg_to_dense(c_tensors.as_mut_ptr(), self.c_tensor, dtype.c_int())); + unsafe_torch_err!(atg_to_dense( + c_tensors.as_mut_ptr(), + self.c_tensor, + dtype.into().map_or(-1, |s| s.c_int()) + )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -34054,9 +34150,13 @@ impl Tensor { Ok(Tensor { c_tensor: c_tensors[0] }) } - pub fn f_to_mkldnn(&self, dtype: Kind) -> Result { + pub fn f_to_mkldnn(&self, dtype: impl Into>) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; - unsafe_torch_err!(atg_to_mkldnn(c_tensors.as_mut_ptr(), self.c_tensor, dtype.c_int())); + unsafe_torch_err!(atg_to_mkldnn( + c_tensors.as_mut_ptr(), + self.c_tensor, + dtype.into().map_or(-1, |s| s.c_int()) + )); Ok(Tensor { c_tensor: c_tensors[0] }) } @@ -34070,13 +34170,17 @@ impl Tensor { Ok(Tensor { c_tensor: c_tensors[0] }) } - pub fn f_to_mkldnn_out(&self, out: &Tensor, dtype: Kind) -> Result { + pub fn f_to_mkldnn_out( + &self, + out: &Tensor, + dtype: impl Into>, + ) -> Result { let mut c_tensors = [std::ptr::null_mut(); 1]; unsafe_torch_err!(atg_to_mkldnn_out( c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor, - dtype.c_int() + dtype.into().map_or(-1, |s| s.c_int()) )); Ok(Tensor { c_tensor: c_tensors[0] }) } diff --git a/src/wrappers/tensor_generated.rs b/src/wrappers/tensor_generated.rs index 5a046d7c..0975a1f8 100644 --- a/src/wrappers/tensor_generated.rs +++ b/src/wrappers/tensor_generated.rs @@ -270,7 +270,7 @@ impl Tensor { a: &Tensor, size: impl Into>, stride: impl Into>, - dtype: Kind, + dtype: impl Into>, ) { Tensor::f_internal_assert_tensor_metadata(a, size, stride, dtype).unwrap() } @@ -3145,7 +3145,7 @@ impl Tensor { sobolstate: &Tensor, dimension: i64, num_generated: i64, - dtype: Kind, + dtype: impl Into>, ) -> (Tensor, Tensor) { Tensor::f_internal_sobol_engine_draw(quasi, n, sobolstate, dimension, num_generated, dtype) .unwrap() @@ -3348,7 +3348,12 @@ impl Tensor { .unwrap() } - pub fn internal_sparse_csr_prod(&self, dim: &[i64], keepdim: bool, dtype: Kind) -> Tensor { + pub fn internal_sparse_csr_prod( + &self, + dim: &[i64], + keepdim: bool, + dtype: impl Into>, + ) -> Tensor { self.f_internal_sparse_csr_prod(dim, keepdim, dtype).unwrap() } @@ -3357,12 +3362,17 @@ impl Tensor { out: &Tensor, dim: &[i64], keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Tensor { self.f_internal_sparse_csr_prod_dim_dtype_out(out, dim, keepdim, dtype).unwrap() } - pub fn internal_sparse_csr_sum(&self, dim: &[i64], keepdim: bool, dtype: Kind) -> Tensor { + pub fn internal_sparse_csr_sum( + &self, + dim: &[i64], + keepdim: bool, + dtype: impl Into>, + ) -> Tensor { self.f_internal_sparse_csr_sum(dim, keepdim, dtype).unwrap() } @@ -3371,7 +3381,7 @@ impl Tensor { out: &Tensor, dim: &[i64], keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Tensor { self.f_internal_sparse_csr_sum_dim_dtype_out(out, dim, keepdim, dtype).unwrap() } @@ -3416,7 +3426,11 @@ impl Tensor { self.f_internal_sparse_log_softmax_backward_data_out(out, grad_output, output, dim).unwrap() } - pub fn internal_sparse_log_softmax_int(&self, dim: i64, dtype: Kind) -> Tensor { + pub fn internal_sparse_log_softmax_int( + &self, + dim: i64, + dtype: impl Into>, + ) -> Tensor { self.f_internal_sparse_log_softmax_int(dim, dtype).unwrap() } @@ -3464,7 +3478,7 @@ impl Tensor { self.f_internal_sparse_softmax_backward_data_out(out, grad_output, output, dim).unwrap() } - pub fn internal_sparse_softmax_int(&self, dim: i64, dtype: Kind) -> Tensor { + pub fn internal_sparse_softmax_int(&self, dim: i64, dtype: impl Into>) -> Tensor { self.f_internal_sparse_softmax_int(dim, dtype).unwrap() } @@ -3670,11 +3684,11 @@ impl Tensor { Tensor::f_internal_to_cpu(tensors).unwrap() } - pub fn internal_to_dense(&self, dtype: Kind) -> Tensor { + pub fn internal_to_dense(&self, dtype: impl Into>) -> Tensor { self.f_internal_to_dense(dtype).unwrap() } - pub fn internal_to_dense_out(&self, out: &Tensor, dtype: Kind) -> Tensor { + pub fn internal_to_dense_out(&self, out: &Tensor, dtype: impl Into>) -> Tensor { self.f_internal_to_dense_out(out, dtype).unwrap() } @@ -7260,11 +7274,11 @@ impl Tensor { self.f_cummin_out(values, indices, dim).unwrap() } - pub fn cumprod(&self, dim: i64, dtype: Kind) -> Tensor { + pub fn cumprod(&self, dim: i64, dtype: impl Into>) -> Tensor { self.f_cumprod(dim, dtype).unwrap() } - pub fn cumprod_(&mut self, dim: i64, dtype: Kind) -> Tensor { + pub fn cumprod_(&mut self, dim: i64, dtype: impl Into>) -> Tensor { self.f_cumprod_(dim, dtype).unwrap() } @@ -7272,19 +7286,19 @@ impl Tensor { self.f_cumprod_backward(grad, dim, output).unwrap() } - pub fn cumprod_out(&self, out: &Tensor, dim: i64, dtype: Kind) -> Tensor { + pub fn cumprod_out(&self, out: &Tensor, dim: i64, dtype: impl Into>) -> Tensor { self.f_cumprod_out(out, dim, dtype).unwrap() } - pub fn cumsum(&self, dim: i64, dtype: Kind) -> Tensor { + pub fn cumsum(&self, dim: i64, dtype: impl Into>) -> Tensor { self.f_cumsum(dim, dtype).unwrap() } - pub fn cumsum_(&mut self, dim: i64, dtype: Kind) -> Tensor { + pub fn cumsum_(&mut self, dim: i64, dtype: impl Into>) -> Tensor { self.f_cumsum_(dim, dtype).unwrap() } - pub fn cumsum_out(&self, out: &Tensor, dim: i64, dtype: Kind) -> Tensor { + pub fn cumsum_out(&self, out: &Tensor, dim: i64, dtype: impl Into>) -> Tensor { self.f_cumsum_out(out, dim, dtype).unwrap() } @@ -10644,7 +10658,7 @@ impl Tensor { ord: S, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Tensor { self.f_linalg_norm(ord, dim, keepdim, dtype).unwrap() } @@ -10654,7 +10668,7 @@ impl Tensor { ord: &str, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Tensor { self.f_linalg_norm_ord_str(ord, dim, keepdim, dtype).unwrap() } @@ -10665,7 +10679,7 @@ impl Tensor { ord: &str, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Tensor { self.f_linalg_norm_ord_str_out(out, ord, dim, keepdim, dtype).unwrap() } @@ -10676,7 +10690,7 @@ impl Tensor { ord: S, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Tensor { self.f_linalg_norm_out(out, ord, dim, keepdim, dtype).unwrap() } @@ -10972,11 +10986,16 @@ impl Tensor { self.f_log_sigmoid_out(out).unwrap() } - pub fn log_softmax(&self, dim: i64, dtype: Kind) -> Tensor { + pub fn log_softmax(&self, dim: i64, dtype: impl Into>) -> Tensor { self.f_log_softmax(dim, dtype).unwrap() } - pub fn log_softmax_int_out(&self, out: &Tensor, dim: i64, dtype: Kind) -> Tensor { + pub fn log_softmax_int_out( + &self, + out: &Tensor, + dim: i64, + dtype: impl Into>, + ) -> Tensor { self.f_log_softmax_int_out(out, dim, dtype).unwrap() } @@ -11650,7 +11669,7 @@ impl Tensor { self.f_maximum_out(out, other).unwrap() } - pub fn mean(&self, dtype: Kind) -> Tensor { + pub fn mean(&self, dtype: impl Into>) -> Tensor { self.f_mean(dtype).unwrap() } @@ -11658,7 +11677,7 @@ impl Tensor { &self, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Tensor { self.f_mean_dim(dim, keepdim, dtype).unwrap() } @@ -11668,7 +11687,7 @@ impl Tensor { out: &Tensor, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Tensor { self.f_mean_out(out, dim, keepdim, dtype).unwrap() } @@ -12863,7 +12882,7 @@ impl Tensor { &self, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Tensor { self.f_nanmean(dim, keepdim, dtype).unwrap() } @@ -12873,7 +12892,7 @@ impl Tensor { out: &Tensor, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Tensor { self.f_nanmean_out(out, dim, keepdim, dtype).unwrap() } @@ -12946,7 +12965,7 @@ impl Tensor { &self, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Tensor { self.f_nansum(dim, keepdim, dtype).unwrap() } @@ -12956,7 +12975,7 @@ impl Tensor { out: &Tensor, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Tensor { self.f_nansum_out(out, dim, keepdim, dtype).unwrap() } @@ -13115,7 +13134,7 @@ impl Tensor { p: S, dim: &[i64], keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Tensor { self.f_native_norm_scalaropt_dim_dtype(p, dim, keepdim, dtype).unwrap() } @@ -13126,7 +13145,7 @@ impl Tensor { p: S, dim: &[i64], keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Tensor { self.f_native_norm_scalaropt_dim_dtype_out(out, p, dim, keepdim, dtype).unwrap() } @@ -13701,19 +13720,25 @@ impl Tensor { self.f_prelu(weight).unwrap() } - pub fn prod(&self, dtype: Kind) -> Tensor { + pub fn prod(&self, dtype: impl Into>) -> Tensor { self.f_prod(dtype).unwrap() } - pub fn prod_dim_int(&self, dim: i64, keepdim: bool, dtype: Kind) -> Tensor { + pub fn prod_dim_int(&self, dim: i64, keepdim: bool, dtype: impl Into>) -> Tensor { self.f_prod_dim_int(dim, keepdim, dtype).unwrap() } - pub fn prod_int_out(&self, out: &Tensor, dim: i64, keepdim: bool, dtype: Kind) -> Tensor { + pub fn prod_int_out( + &self, + out: &Tensor, + dim: i64, + keepdim: bool, + dtype: impl Into>, + ) -> Tensor { self.f_prod_int_out(out, dim, keepdim, dtype).unwrap() } - pub fn prod_out(&self, out: &Tensor, dtype: Kind) -> Tensor { + pub fn prod_out(&self, out: &Tensor, dtype: impl Into>) -> Tensor { self.f_prod_out(out, dtype).unwrap() } @@ -15574,11 +15599,16 @@ impl Tensor { self.f_soft_margin_loss_out(out, target, reduction).unwrap() } - pub fn softmax(&self, dim: i64, dtype: Kind) -> Tensor { + pub fn softmax(&self, dim: i64, dtype: impl Into>) -> Tensor { self.f_softmax(dim, dtype).unwrap() } - pub fn softmax_int_out(&self, out: &Tensor, dim: i64, dtype: Kind) -> Tensor { + pub fn softmax_int_out( + &self, + out: &Tensor, + dim: i64, + dtype: impl Into>, + ) -> Tensor { self.f_softmax_int_out(out, dim, dtype).unwrap() } @@ -16316,7 +16346,7 @@ impl Tensor { self.f_special_log_ndtr_out(out).unwrap() } - pub fn special_log_softmax(&self, dim: i64, dtype: Kind) -> Tensor { + pub fn special_log_softmax(&self, dim: i64, dtype: impl Into>) -> Tensor { self.f_special_log_softmax(dim, dtype).unwrap() } @@ -16608,7 +16638,7 @@ impl Tensor { self.f_special_sinc_out(out).unwrap() } - pub fn special_softmax(&self, dim: i64, dtype: Kind) -> Tensor { + pub fn special_softmax(&self, dim: i64, dtype: impl Into>) -> Tensor { self.f_special_softmax(dim, dtype).unwrap() } @@ -16992,7 +17022,7 @@ impl Tensor { self.f_subtract_scalar_(other).unwrap() } - pub fn sum(&self, dtype: Kind) -> Tensor { + pub fn sum(&self, dtype: impl Into>) -> Tensor { self.f_sum(dtype).unwrap() } @@ -17000,7 +17030,7 @@ impl Tensor { &self, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Tensor { self.f_sum_dim_intlist(dim, keepdim, dtype).unwrap() } @@ -17010,12 +17040,12 @@ impl Tensor { out: &Tensor, dim: impl Into>, keepdim: bool, - dtype: Kind, + dtype: impl Into>, ) -> Tensor { self.f_sum_intlist_out(out, dim, keepdim, dtype).unwrap() } - pub fn sum_out(&self, out: &Tensor, dtype: Kind) -> Tensor { + pub fn sum_out(&self, out: &Tensor, dtype: impl Into>) -> Tensor { self.f_sum_out(out, dtype).unwrap() } @@ -17194,7 +17224,7 @@ impl Tensor { self.f_to(device).unwrap() } - pub fn to_dense(&self, dtype: Kind) -> Tensor { + pub fn to_dense(&self, dtype: impl Into>) -> Tensor { self.f_to_dense(dtype).unwrap() } @@ -17225,7 +17255,7 @@ impl Tensor { self.f_to_dtype_layout(options, non_blocking, copy).unwrap() } - pub fn g_to_mkldnn(&self, dtype: Kind) -> Tensor { + pub fn g_to_mkldnn(&self, dtype: impl Into>) -> Tensor { self.f_to_mkldnn(dtype).unwrap() } @@ -17233,7 +17263,7 @@ impl Tensor { self.f_to_mkldnn_backward(grad).unwrap() } - pub fn to_mkldnn_out(&self, out: &Tensor, dtype: Kind) -> Tensor { + pub fn to_mkldnn_out(&self, out: &Tensor, dtype: impl Into>) -> Tensor { self.f_to_mkldnn_out(out, dtype).unwrap() } diff --git a/torch-sys/libtch/torch_api_generated.cpp.h b/torch-sys/libtch/torch_api_generated.cpp.h index 3a14fb5d..e72dbe44 100644 --- a/torch-sys/libtch/torch_api_generated.cpp.h +++ b/torch-sys/libtch/torch_api_generated.cpp.h @@ -343,7 +343,7 @@ void atg__amp_update_scale_out(tensor *out__, tensor out, tensor self, tensor gr void atg__assert_tensor_metadata(tensor a, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int dtype) { PROTECT( - torch::_assert_tensor_metadata(*a, size_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(size_data, size_len)), stride_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(stride_data, stride_len)), at::ScalarType(dtype)); + torch::_assert_tensor_metadata(*a, size_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(size_data, size_len)), stride_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(stride_data, stride_len)), dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); ) } @@ -2127,7 +2127,7 @@ void atg__slow_conv2d_backward(tensor *out__, tensor grad_input, tensor grad_wei void atg__sobol_engine_draw(tensor *out__, tensor quasi, int64_t n, tensor sobolstate, int64_t dimension, int64_t num_generated, int dtype) { PROTECT( - auto outputs__ = torch::_sobol_engine_draw(*quasi, n, *sobolstate, dimension, num_generated, at::ScalarType(dtype)); + auto outputs__ = torch::_sobol_engine_draw(*quasi, n, *sobolstate, dimension, num_generated, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); ) @@ -2282,28 +2282,28 @@ void atg__sparse_csc_tensor_unsafe(tensor *out__, tensor ccol_indices, tensor ro void atg__sparse_csr_prod(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::_sparse_csr_prod(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, at::ScalarType(dtype)); + auto outputs__ = torch::_sparse_csr_prod(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg__sparse_csr_prod_dim_dtype_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::_sparse_csr_prod_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, at::ScalarType(dtype)); + auto outputs__ = torch::_sparse_csr_prod_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg__sparse_csr_sum(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::_sparse_csr_sum(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, at::ScalarType(dtype)); + auto outputs__ = torch::_sparse_csr_sum(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg__sparse_csr_sum_dim_dtype_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::_sparse_csr_sum_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, at::ScalarType(dtype)); + auto outputs__ = torch::_sparse_csr_sum_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } @@ -2338,7 +2338,7 @@ void atg__sparse_log_softmax_backward_data_out(tensor *out__, tensor out, tensor void atg__sparse_log_softmax_int(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( - auto outputs__ = torch::_sparse_log_softmax(*self, dim, at::ScalarType(dtype)); + auto outputs__ = torch::_sparse_log_softmax(*self, dim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } @@ -2395,7 +2395,7 @@ void atg__sparse_softmax_backward_data_out(tensor *out__, tensor out, tensor gra void atg__sparse_softmax_int(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( - auto outputs__ = torch::_sparse_softmax(*self, dim, at::ScalarType(dtype)); + auto outputs__ = torch::_sparse_softmax(*self, dim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } @@ -2688,14 +2688,14 @@ tensor *atg__to_cpu(tensor *tensors_data, int tensors_len) { void atg__to_dense(tensor *out__, tensor self, int dtype) { PROTECT( - auto outputs__ = self->_to_dense(at::ScalarType(dtype)); + auto outputs__ = self->_to_dense(dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg__to_dense_out(tensor *out__, tensor out, tensor self, int dtype) { PROTECT( - auto outputs__ = torch::_to_dense_out(*out, *self, at::ScalarType(dtype)); + auto outputs__ = torch::_to_dense_out(*out, *self, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } @@ -5991,14 +5991,14 @@ void atg_cummin_out(tensor *out__, tensor values, tensor indices, tensor self, i void atg_cumprod(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( - auto outputs__ = torch::cumprod(*self, dim, at::ScalarType(dtype)); + auto outputs__ = torch::cumprod(*self, dim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg_cumprod_(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( - auto outputs__ = self->cumprod_(dim, at::ScalarType(dtype)); + auto outputs__ = self->cumprod_(dim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } @@ -6012,28 +6012,28 @@ void atg_cumprod_backward(tensor *out__, tensor grad, tensor input, int64_t dim, void atg_cumprod_out(tensor *out__, tensor out, tensor self, int64_t dim, int dtype) { PROTECT( - auto outputs__ = torch::cumprod_out(*out, *self, dim, at::ScalarType(dtype)); + auto outputs__ = torch::cumprod_out(*out, *self, dim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg_cumsum(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( - auto outputs__ = torch::cumsum(*self, dim, at::ScalarType(dtype)); + auto outputs__ = torch::cumsum(*self, dim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg_cumsum_(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( - auto outputs__ = self->cumsum_(dim, at::ScalarType(dtype)); + auto outputs__ = self->cumsum_(dim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg_cumsum_out(tensor *out__, tensor out, tensor self, int64_t dim, int dtype) { PROTECT( - auto outputs__ = torch::cumsum_out(*out, *self, dim, at::ScalarType(dtype)); + auto outputs__ = torch::cumsum_out(*out, *self, dim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } @@ -10038,28 +10038,28 @@ void atg_linalg_multi_dot_out(tensor *out__, tensor out, tensor *tensors_data, i void atg_linalg_norm(tensor *out__, tensor self, scalar ord, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::linalg_norm(*self, *ord, dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, at::ScalarType(dtype)); + auto outputs__ = torch::linalg_norm(*self, *ord, dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg_linalg_norm_ord_str(tensor *out__, tensor self, char* ord_ptr, int ord_len, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::linalg_norm(*self, std::string(ord_ptr, ord_len), dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, at::ScalarType(dtype)); + auto outputs__ = torch::linalg_norm(*self, std::string(ord_ptr, ord_len), dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg_linalg_norm_ord_str_out(tensor *out__, tensor out, tensor self, char* ord_ptr, int ord_len, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::linalg_norm_out(*out, *self, std::string(ord_ptr, ord_len), dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, at::ScalarType(dtype)); + auto outputs__ = torch::linalg_norm_out(*out, *self, std::string(ord_ptr, ord_len), dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg_linalg_norm_out(tensor *out__, tensor out, tensor self, scalar ord, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::linalg_norm_out(*out, *self, *ord, dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, at::ScalarType(dtype)); + auto outputs__ = torch::linalg_norm_out(*out, *self, *ord, dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } @@ -10440,14 +10440,14 @@ void atg_log_sigmoid_out(tensor *out__, tensor out, tensor self) { void atg_log_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( - auto outputs__ = torch::log_softmax(*self, dim, at::ScalarType(dtype)); + auto outputs__ = torch::log_softmax(*self, dim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg_log_softmax_int_out(tensor *out__, tensor out, tensor self, int64_t dim, int dtype) { PROTECT( - auto outputs__ = torch::log_softmax_out(*out, *self, dim, at::ScalarType(dtype)); + auto outputs__ = torch::log_softmax_out(*out, *self, dim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } @@ -11085,21 +11085,21 @@ void atg_maximum_out(tensor *out__, tensor out, tensor self, tensor other) { void atg_mean(tensor *out__, tensor self, int dtype) { PROTECT( - auto outputs__ = torch::mean(*self, at::ScalarType(dtype)); + auto outputs__ = torch::mean(*self, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg_mean_dim(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::mean(*self, dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, at::ScalarType(dtype)); + auto outputs__ = torch::mean(*self, dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg_mean_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::mean_out(*out, *self, dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, at::ScalarType(dtype)); + auto outputs__ = torch::mean_out(*out, *self, dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } @@ -11874,14 +11874,14 @@ void atg_nan_to_num_out(tensor *out__, tensor out, tensor self, double nan_v, ui void atg_nanmean(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::nanmean(*self, dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, at::ScalarType(dtype)); + auto outputs__ = torch::nanmean(*self, dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg_nanmean_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::nanmean_out(*out, *self, dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, at::ScalarType(dtype)); + auto outputs__ = torch::nanmean_out(*out, *self, dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } @@ -11946,14 +11946,14 @@ void atg_nanquantile_scalar_out(tensor *out__, tensor out, tensor self, double q void atg_nansum(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::nansum(*self, dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, at::ScalarType(dtype)); + auto outputs__ = torch::nansum(*self, dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg_nansum_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::nansum_out(*out, *self, dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, at::ScalarType(dtype)); + auto outputs__ = torch::nansum_out(*out, *self, dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } @@ -12093,14 +12093,14 @@ void atg_native_norm_out(tensor *out__, tensor out, tensor self) { void atg_native_norm_scalaropt_dim_dtype(tensor *out__, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::native_norm(*self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, at::ScalarType(dtype)); + auto outputs__ = torch::native_norm(*self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg_native_norm_scalaropt_dim_dtype_out(tensor *out__, tensor out, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::native_norm_out(*out, *self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, at::ScalarType(dtype)); + auto outputs__ = torch::native_norm_out(*out, *self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } @@ -12834,28 +12834,28 @@ void atg_prelu(tensor *out__, tensor self, tensor weight) { void atg_prod(tensor *out__, tensor self, int dtype) { PROTECT( - auto outputs__ = torch::prod(*self, at::ScalarType(dtype)); + auto outputs__ = torch::prod(*self, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg_prod_dim_int(tensor *out__, tensor self, int64_t dim, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::prod(*self, dim, (bool)keepdim, at::ScalarType(dtype)); + auto outputs__ = torch::prod(*self, dim, (bool)keepdim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg_prod_int_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::prod_out(*out, *self, dim, (bool)keepdim, at::ScalarType(dtype)); + auto outputs__ = torch::prod_out(*out, *self, dim, (bool)keepdim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg_prod_out(tensor *out__, tensor out, tensor self, int dtype) { PROTECT( - auto outputs__ = torch::prod_out(*out, *self, at::ScalarType(dtype)); + auto outputs__ = torch::prod_out(*out, *self, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } @@ -14751,14 +14751,14 @@ void atg_soft_margin_loss_out(tensor *out__, tensor out, tensor self, tensor tar void atg_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( - auto outputs__ = torch::softmax(*self, dim, at::ScalarType(dtype)); + auto outputs__ = torch::softmax(*self, dim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg_softmax_int_out(tensor *out__, tensor out, tensor self, int64_t dim, int dtype) { PROTECT( - auto outputs__ = torch::softmax_out(*out, *self, dim, at::ScalarType(dtype)); + auto outputs__ = torch::softmax_out(*out, *self, dim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } @@ -15686,7 +15686,7 @@ void atg_special_log_ndtr_out(tensor *out__, tensor out, tensor self) { void atg_special_log_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( - auto outputs__ = torch::special_log_softmax(*self, dim, at::ScalarType(dtype)); + auto outputs__ = torch::special_log_softmax(*self, dim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } @@ -16071,7 +16071,7 @@ void atg_special_sinc_out(tensor *out__, tensor out, tensor self) { void atg_special_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( - auto outputs__ = torch::special_softmax(*self, dim, at::ScalarType(dtype)); + auto outputs__ = torch::special_softmax(*self, dim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } @@ -16607,28 +16607,28 @@ void atg_subtract_scalar_(tensor *out__, tensor self, scalar other) { void atg_sum(tensor *out__, tensor self, int dtype) { PROTECT( - auto outputs__ = torch::sum(*self, at::ScalarType(dtype)); + auto outputs__ = torch::sum(*self, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg_sum_dim_intlist(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::sum(*self, dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, at::ScalarType(dtype)); + auto outputs__ = torch::sum(*self, dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg_sum_intlist_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::sum_out(*out, *self, dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, at::ScalarType(dtype)); + auto outputs__ = torch::sum_out(*out, *self, dim_data == nullptr ? c10::nullopt : c10::optional(torch::IntArrayRef(dim_data, dim_len)), (bool)keepdim, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } void atg_sum_out(tensor *out__, tensor out, tensor self, int dtype) { PROTECT( - auto outputs__ = torch::sum_out(*out, *self, at::ScalarType(dtype)); + auto outputs__ = torch::sum_out(*out, *self, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } @@ -16902,7 +16902,7 @@ void atg_to(tensor *out__, tensor self, int device) { void atg_to_dense(tensor *out__, tensor self, int dtype) { PROTECT( - auto outputs__ = self->to_dense(at::ScalarType(dtype)); + auto outputs__ = self->to_dense(dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } @@ -16937,7 +16937,7 @@ void atg_to_dtype_layout(tensor *out__, tensor self, int options_kind, int optio void atg_to_mkldnn(tensor *out__, tensor self, int dtype) { PROTECT( - auto outputs__ = self->to_mkldnn(at::ScalarType(dtype)); + auto outputs__ = self->to_mkldnn(dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) } @@ -16951,7 +16951,7 @@ void atg_to_mkldnn_backward(tensor *out__, tensor grad, tensor input) { void atg_to_mkldnn_out(tensor *out__, tensor out, tensor self, int dtype) { PROTECT( - auto outputs__ = torch::to_mkldnn_out(*out, *self, at::ScalarType(dtype)); + auto outputs__ = torch::to_mkldnn_out(*out, *self, dtype < 0 ? c10::nullopt : c10::optional(at::ScalarType(dtype))); out__[0] = new torch::Tensor(outputs__); ) }