Skip to content

Commit

Permalink
runtime: allocate map element zero values for reflect-created types o…
Browse files Browse the repository at this point in the history
…n demand

Preallocating them in reflect means that
(1) if you say _ = PtrTo(ArrayOf(1000000000, reflect.TypeOf(byte(0)))), you just allocated 1GB of data
(2) if you say it again, that's *another* GB of data.

The only use of t.zero in the runtime is for map elements.
Delay the allocation until the creation of a map with that element type,
and share the zeros.

The one downside of the shared zero is that it's not garbage collected,
but it's also never written, so the OS should be able to handle it fairly
efficiently.

Change-Id: I56b098a091abf3ac0945de28ebef9a6c08e76614
Reviewed-on: https://go-review.googlesource.com/10111
Reviewed-by: Keith Randall <[email protected]>
  • Loading branch information
rsc committed May 15, 2015
1 parent 65c4d7b commit 7e26a2d
Show file tree
Hide file tree
Showing 2 changed files with 48 additions and 9 deletions.
9 changes: 0 additions & 9 deletions src/reflect/type.go
Original file line number Diff line number Diff line change
Expand Up @@ -1087,7 +1087,6 @@ func (t *rtype) ptrTo() *rtype {

p.uncommonType = nil
p.ptrToThis = nil
p.zero = unsafe.Pointer(&make([]byte, p.size)[0])
p.elem = t

ptrMap.m[t] = p
Expand Down Expand Up @@ -1467,7 +1466,6 @@ func ChanOf(dir ChanDir, t Type) Type {
ch.elem = typ
ch.uncommonType = nil
ch.ptrToThis = nil
ch.zero = unsafe.Pointer(&make([]byte, ch.size)[0])

return cachePut(ckey, &ch.rtype)
}
Expand Down Expand Up @@ -1530,7 +1528,6 @@ func MapOf(key, elem Type) Type {
mt.reflexivekey = isReflexive(ktyp)
mt.uncommonType = nil
mt.ptrToThis = nil
mt.zero = unsafe.Pointer(&make([]byte, mt.size)[0])

return cachePut(ckey, &mt.rtype)
}
Expand Down Expand Up @@ -1610,7 +1607,6 @@ func FuncOf(in, out []Type, variadic bool) Type {
ft.string = &str
ft.uncommonType = nil
ft.ptrToThis = nil
ft.zero = unsafe.Pointer(&make([]byte, ft.size)[0])
funcLookupCache.m[hash] = append(funcLookupCache.m[hash], &ft.rtype)

return ft
Expand Down Expand Up @@ -1857,7 +1853,6 @@ func SliceOf(t Type) Type {
slice.elem = typ
slice.uncommonType = nil
slice.ptrToThis = nil
slice.zero = unsafe.Pointer(&make([]byte, slice.size)[0])

return cachePut(ckey, &slice.rtype)
}
Expand Down Expand Up @@ -1913,10 +1908,6 @@ func ArrayOf(count int, elem Type) Type {
array.fieldAlign = typ.fieldAlign
array.uncommonType = nil
array.ptrToThis = nil
if array.size > 0 {
zero := make([]byte, array.size)
array.zero = unsafe.Pointer(&zero[0])
}
array.len = uintptr(count)
array.slice = slice.(*rtype)

Expand Down
48 changes: 48 additions & 0 deletions src/runtime/hashmap.go
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,9 @@ func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap {
throw("need padding in bucket (value)")
}

// make sure zero of element type is available.
mapzero(t.elem)

// find size parameter which will hold the requested # of elements
B := uint8(0)
for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ {
Expand Down Expand Up @@ -990,3 +993,48 @@ func reflect_maplen(h *hmap) int {
func reflect_ismapkey(t *_type) bool {
return ismapkey(t)
}

var zerobuf struct {
lock mutex
p *byte
size uintptr
}

var zerotiny [1024]byte

// mapzero ensures that t.zero points at a zero value for type t.
// Types known to the compiler are in read-only memory and all point
// to a single zero in the bss of a large enough size.
// Types allocated by package reflect are in writable memory and
// start out with zero set to nil; we initialize those on demand.
func mapzero(t *_type) {
// Already done?
// Check without lock, so must use atomicload to sync with atomicstore in allocation case below.
if atomicloadp(unsafe.Pointer(&t.zero)) != nil {
return
}

// Small enough for static buffer?
if t.size <= uintptr(len(zerotiny)) {
atomicstorep(unsafe.Pointer(&t.zero), unsafe.Pointer(&zerotiny[0]))
return
}

// Use allocated buffer.
lock(&zerobuf.lock)
if zerobuf.size < t.size {
if zerobuf.size == 0 {
zerobuf.size = 4 * 1024
}
for zerobuf.size < t.size {
zerobuf.size *= 2
if zerobuf.size == 0 {
// need >2GB zero on 32-bit machine
throw("map element too large")
}
}
zerobuf.p = (*byte)(persistentalloc(zerobuf.size, 64, &memstats.other_sys))
}
atomicstorep(unsafe.Pointer(&t.zero), unsafe.Pointer(zerobuf.p))
unlock(&zerobuf.lock)
}

0 comments on commit 7e26a2d

Please sign in to comment.