From 10e81ce1ad317188ed3772a1474fa40ae7fe1f52 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Thu, 25 Oct 2018 00:50:16 -0400 Subject: [PATCH] fix promote_op function (#29739) This function had some weird and broken extraneous code, using Core.Inference directly is not recommended, but where we are going to do it anyways, we should at least do it correctly. The `cumsum` code had a test to assert that `Real + Real` was convertable to `Real`. This was hacked into the code poorly. It is now hacked in directly. We can separately debate if this is a good idea, I am simply preserving the existing behavior of the code but implementing it correctly (by changing the behavior of the actual function, instead of by convincing inference to return the incorrect answer). --- base/complex.jl | 4 ---- base/float.jl | 2 -- base/int.jl | 3 --- base/intfuncs.jl | 2 +- base/number.jl | 2 -- base/promotion.jl | 24 +----------------------- base/reduce.jl | 20 +++++++++++--------- stdlib/LinearAlgebra/src/adjtrans.jl | 2 +- 8 files changed, 14 insertions(+), 45 deletions(-) diff --git a/base/complex.jl b/base/complex.jl index b4d76f3c8699e..ea66c211c8058 100644 --- a/base/complex.jl +++ b/base/complex.jl @@ -991,7 +991,3 @@ function complex(A::AbstractArray{T}) where T end convert(AbstractArray{typeof(complex(zero(T)))}, A) end - -## promotion to complex ## - -_default_type(T::Type{Complex}) = Complex{Int} diff --git a/base/float.jl b/base/float.jl index d4b0b97af19d4..3a58d4c7cf2e2 100644 --- a/base/float.jl +++ b/base/float.jl @@ -381,8 +381,6 @@ promote_rule(::Type{Float64}, ::Type{Float32}) = Float64 widen(::Type{Float16}) = Float32 widen(::Type{Float32}) = Float64 -_default_type(T::Union{Type{Real},Type{AbstractFloat}}) = Float64 - ## floating point arithmetic ## -(x::Float64) = neg_float(x) -(x::Float32) = neg_float(x) diff --git a/base/int.jl b/base/int.jl index 718542e0e13ea..e786e1c37f870 100644 --- a/base/int.jl +++ b/base/int.jl @@ -632,9 +632,6 @@ promote_rule(::Type{UInt32}, ::Type{Int32} ) = UInt32 promote_rule(::Type{UInt64}, ::Type{Int64} ) = UInt64 promote_rule(::Type{UInt128}, ::Type{Int128}) = UInt128 -_default_type(::Type{Unsigned}) = UInt -_default_type(::Union{Type{Integer},Type{Signed}}) = Int - ## traits ## """ diff --git a/base/intfuncs.jl b/base/intfuncs.jl index 8b9f3d441ec01..615abf7c38e7c 100644 --- a/base/intfuncs.jl +++ b/base/intfuncs.jl @@ -169,7 +169,7 @@ end invmod(n::Integer, m::Integer) = invmod(promote(n,m)...) # ^ for any x supporting * -to_power_type(x) = convert(promote_op(*, typeof(x), typeof(x)), x) +to_power_type(x) = convert(Base._return_type(*, Tuple{typeof(x), typeof(x)}), x) @noinline throw_domerr_powbysq(::Any, p) = throw(DomainError(p, string("Cannot raise an integer x to a negative power ", p, '.', "\nConvert input to float."))) diff --git a/base/number.jl b/base/number.jl index 8b819bd5739ed..a51137600cc93 100644 --- a/base/number.jl +++ b/base/number.jl @@ -320,8 +320,6 @@ julia> import Dates; oneunit(Dates.Day) oneunit(x::T) where {T} = T(one(x)) oneunit(::Type{T}) where {T} = T(one(T)) -_default_type(::Type{Number}) = Int - """ big(T::Type) diff --git a/base/promotion.jl b/base/promotion.jl index 319284beaaaf9..4eb80c1b68239 100644 --- a/base/promotion.jl +++ b/base/promotion.jl @@ -364,11 +364,6 @@ max(x::Real, y::Real) = max(promote(x,y)...) min(x::Real, y::Real) = min(promote(x,y)...) minmax(x::Real, y::Real) = minmax(promote(x, y)...) -# "Promotion" that takes a function into account and tries to preserve -# non-concrete types. These are meant to be used mainly by elementwise -# operations, so it is advised against overriding them -_default_type(T::Type) = T - if isdefined(Core, :Compiler) const _return_type = Core.Compiler.return_type else @@ -381,29 +376,12 @@ end Guess what an appropriate container eltype would be for storing results of `f(::argtypes...)`. The guess is in part based on type inference, so can change any time. -!!! warning - In pathological cases, the type returned by `promote_op(f, argtypes...)` may not even - be a supertype of the return value of `f(::argtypes...)`. Therefore, `promote_op` - should _not_ be used e.g. in the preallocation of an output array. - !!! warning Due to its fragility, use of `promote_op` should be avoided. It is preferable to base the container eltype on the type of the actual elements. Only in the absence of any elements (for an empty result container), it may be unavoidable to call `promote_op`. """ -promote_op(::Any...) = Any -function promote_op(f, ::Type{S}) where S - TT = Tuple{_default_type(S)} - T = _return_type(f, TT) - isdispatchtuple(Tuple{S}) && return isdispatchtuple(Tuple{T}) ? T : Any - return typejoin(S, T) -end -function promote_op(f, ::Type{R}, ::Type{S}) where {R,S} - TT = Tuple{_default_type(R), _default_type(S)} - T = _return_type(f, TT) - isdispatchtuple(Tuple{R}) && isdispatchtuple(Tuple{S}) && return isdispatchtuple(Tuple{T}) ? T : Any - return typejoin(R, S, T) -end +promote_op(f, S::Type...) = _return_type(f, Tuple{S...}) ## catch-alls to prevent infinite recursion when definitions are missing ## diff --git a/base/reduce.jl b/base/reduce.jl index 5e048138990c7..0fbcc5a8a2a45 100644 --- a/base/reduce.jl +++ b/base/reduce.jl @@ -13,24 +13,26 @@ else end """ - Base.add_sum(x,y) + Base.add_sum(x, y) The reduction operator used in `sum`. The main difference from [`+`](@ref) is that small integers are promoted to `Int`/`UInt`. """ -add_sum(x,y) = x + y -add_sum(x::SmallSigned,y::SmallSigned) = Int(x) + Int(y) -add_sum(x::SmallUnsigned,y::SmallUnsigned) = UInt(x) + UInt(y) +add_sum(x, y) = x + y +add_sum(x::SmallSigned, y::SmallSigned) = Int(x) + Int(y) +add_sum(x::SmallUnsigned, y::SmallUnsigned) = UInt(x) + UInt(y) +add_sum(x::Real, y::Real)::Real = x + y """ - Base.mul_prod(x,y) + Base.mul_prod(x, y) The reduction operator used in `prod`. The main difference from [`*`](@ref) is that small integers are promoted to `Int`/`UInt`. """ -mul_prod(x,y) = x * y -mul_prod(x::SmallSigned,y::SmallSigned) = Int(x) * Int(y) -mul_prod(x::SmallUnsigned,y::SmallUnsigned) = UInt(x) * UInt(y) +mul_prod(x, y) = x * y +mul_prod(x::SmallSigned, y::SmallSigned) = Int(x) * Int(y) +mul_prod(x::SmallUnsigned, y::SmallUnsigned) = UInt(x) * UInt(y) +mul_prod(x::Real, y::Real)::Real = x * y ## foldl && mapfoldl @@ -56,7 +58,7 @@ function mapfoldl_impl(f, op, nt::NamedTuple{()}, itr) end (x, i) = y init = mapreduce_first(f, op, x) - mapfoldl_impl(f, op, (init=init,), itr, i) + return mapfoldl_impl(f, op, (init=init,), itr, i) end diff --git a/stdlib/LinearAlgebra/src/adjtrans.jl b/stdlib/LinearAlgebra/src/adjtrans.jl index 208c2f884c563..8ded77a7eda74 100644 --- a/stdlib/LinearAlgebra/src/adjtrans.jl +++ b/stdlib/LinearAlgebra/src/adjtrans.jl @@ -1,6 +1,6 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Base: @propagate_inbounds, _return_type, _default_type, @_inline_meta +using Base: @propagate_inbounds, @_inline_meta import Base: length, size, axes, IndexStyle, getindex, setindex!, parent, vec, convert, similar ### basic definitions (types, aliases, constructors, abstractarray interface, sundry similar)