Skip to content

Commit

Permalink
math/big: more cleanups (msbxx, nlzxx functions)
Browse files Browse the repository at this point in the history
Change-Id: Ibace718452b6dc029c5af5240117f5fc794c38cf
Reviewed-on: https://go-review.googlesource.com/10388
Reviewed-by: Alan Donovan <[email protected]>
  • Loading branch information
griesemer committed May 27, 2015
1 parent 0858d88 commit 635cd91
Show file tree
Hide file tree
Showing 4 changed files with 61 additions and 46 deletions.
21 changes: 18 additions & 3 deletions src/math/big/arith.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,19 +107,34 @@ func log2(x Word) int {
return bitLen(x) - 1
}

// Number of leading zeros in x.
func leadingZeros(x Word) uint {
// nlz returns the number of leading zeros in x.
func nlz(x Word) uint {
return uint(_W - bitLen(x))
}

// nlz64 returns the number of leading zeros in x.
func nlz64(x uint64) uint {
switch _W {
case 32:
w := x >> 32
if w == 0 {
return 32 + nlz(Word(x))
}
return nlz(Word(w))
case 64:
return nlz(Word(x))
}
panic("unreachable")
}

// q = (u1<<_W + u0 - r)/y
// Adapted from Warren, Hacker's Delight, p. 152.
func divWW_g(u1, u0, v Word) (q, r Word) {
if u1 >= v {
return 1<<_W - 1, 1<<_W - 1
}

s := leadingZeros(v)
s := nlz(v)
v <<= s

vn1 := v >> _W2
Expand Down
76 changes: 38 additions & 38 deletions src/math/big/float.go
Original file line number Diff line number Diff line change
Expand Up @@ -525,25 +525,6 @@ func (z *Float) round(sbit uint) {
return
}

// nlz returns the number of leading zero bits in x.
func nlz(x Word) uint {
return _W - uint(bitLen(x))
}

func nlz64(x uint64) uint {
// TODO(gri) this can be done more nicely
if _W == 32 {
if x>>32 == 0 {
return 32 + nlz(Word(x))
}
return nlz(Word(x >> 32))
}
if _W == 64 {
return nlz(Word(x))
}
panic("unreachable")
}

func (z *Float) setBits64(neg bool, x uint64) *Float {
if z.prec == 0 {
z.prec = 64
Expand Down Expand Up @@ -732,25 +713,44 @@ func (z *Float) Copy(x *Float) *Float {
return z
}

func high32(x nat) uint32 {
// TODO(gri) This can be done more efficiently on 32bit platforms.
return uint32(high64(x) >> 32)
// msb32 returns the 32 most significant bits of x.
func msb32(x nat) uint32 {
i := len(x) - 1
if i < 0 {
return 0
}
if debugFloat && x[i]&(1<<(_W-1)) == 0 {
panic("x not normalized")
}
switch _W {
case 32:
return uint32(x[i])
case 64:
return uint32(x[i] >> 32)
}
panic("unreachable")
}

func high64(x nat) uint64 {
i := len(x)
if i == 0 {
// msb64 returns the 64 most significant bits of x.
func msb64(x nat) uint64 {
i := len(x) - 1
if i < 0 {
return 0
}
// i > 0
v := uint64(x[i-1])
if _W == 32 {
v <<= 32
if i > 1 {
v |= uint64(x[i-2])
if debugFloat && x[i]&(1<<(_W-1)) == 0 {
panic("x not normalized")
}
switch _W {
case 32:
v := uint64(x[i]) << 32
if i > 0 {
v |= uint64(x[i-1])
}
return v
case 64:
return uint64(x[i])
}
return v
panic("unreachable")
}

// Uint64 returns the unsigned integer resulting from truncating x
Expand All @@ -776,7 +776,7 @@ func (x *Float) Uint64() (uint64, Accuracy) {
// 1 <= x < Inf
if x.exp <= 64 {
// u = trunc(x) fits into a uint64
u := high64(x.mant) >> (64 - uint32(x.exp))
u := msb64(x.mant) >> (64 - uint32(x.exp))
if x.MinPrec() <= 64 {
return u, Exact
}
Expand Down Expand Up @@ -821,7 +821,7 @@ func (x *Float) Int64() (int64, Accuracy) {
// 1 <= |x| < +Inf
if x.exp <= 63 {
// i = trunc(x) fits into an int64 (excluding math.MinInt64)
i := int64(high64(x.mant) >> (64 - uint32(x.exp)))
i := int64(msb64(x.mant) >> (64 - uint32(x.exp)))
if x.neg {
i = -i
}
Expand Down Expand Up @@ -934,11 +934,11 @@ func (x *Float) Float32() (float32, Accuracy) {
return 0.0, Below
}
// bexp = 0
mant = high32(r.mant) >> (fbits - r.prec)
mant = msb32(r.mant) >> (fbits - r.prec)
} else {
// normal number: emin <= e <= emax
bexp = uint32(e+bias) << mbits
mant = high32(r.mant) >> ebits & (1<<mbits - 1) // cut off msb (implicit 1 bit)
mant = msb32(r.mant) >> ebits & (1<<mbits - 1) // cut off msb (implicit 1 bit)
}

return math.Float32frombits(sign | bexp | mant), r.acc
Expand Down Expand Up @@ -1041,11 +1041,11 @@ func (x *Float) Float64() (float64, Accuracy) {
return 0.0, Below
}
// bexp = 0
mant = high64(r.mant) >> (fbits - r.prec)
mant = msb64(r.mant) >> (fbits - r.prec)
} else {
// normal number: emin <= e <= emax
bexp = uint64(e+bias) << mbits
mant = high64(r.mant) >> ebits & (1<<mbits - 1) // cut off msb (implicit 1 bit)
mant = msb64(r.mant) >> ebits & (1<<mbits - 1) // cut off msb (implicit 1 bit)
}

return math.Float64frombits(sign | bexp | mant), r.acc
Expand Down
4 changes: 2 additions & 2 deletions src/math/big/nat.go
Original file line number Diff line number Diff line change
Expand Up @@ -572,7 +572,7 @@ func (z nat) divLarge(u, uIn, v nat) (q, r nat) {
u.clear() // TODO(gri) no need to clear if we allocated a new u

// D1.
shift := leadingZeros(v[n-1])
shift := nlz(v[n-1])
if shift > 0 {
// do not modify v, it may be used by another goroutine simultaneously
v1 := make(nat, n)
Expand Down Expand Up @@ -942,7 +942,7 @@ func (z nat) expNN(x, y, m nat) nat {
}

v := y[len(y)-1] // v > 0 because y is normalized and y > 0
shift := leadingZeros(v) + 1
shift := nlz(v) + 1
v <<= shift
var q nat

Expand Down
6 changes: 3 additions & 3 deletions src/math/big/nat_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -205,11 +205,11 @@ func BenchmarkMul(b *testing.B) {
}
}

func TestLeadingZeros(t *testing.T) {
func TestNLZ(t *testing.T) {
var x Word = _B >> 1
for i := 0; i <= _W; i++ {
if int(leadingZeros(x)) != i {
t.Errorf("failed at %x: got %d want %d", x, leadingZeros(x), i)
if int(nlz(x)) != i {
t.Errorf("failed at %x: got %d want %d", x, nlz(x), i)
}
x >>= 1
}
Expand Down

0 comments on commit 635cd91

Please sign in to comment.