Skip to content

Commit

Permalink
fix: add gc_write_barrier checks (bytedance#131)
Browse files Browse the repository at this point in the history
* fix: add gcWriteBarrier for decode

* add gcWriteBarrier for encoder


Co-authored-by: duanyi.aster <[email protected]>
  • Loading branch information
AsterDY and AsterDY authored Nov 17, 2021
1 parent ba4c2d2 commit 43e4a00
Show file tree
Hide file tree
Showing 17 changed files with 308 additions and 101 deletions.
50 changes: 25 additions & 25 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,31 +26,31 @@ For **all sizes** of json and **all cases** of usage, **Sonic performs best**.
goos: darwin
goarch: amd64
cpu: Intel(R) Core(TM) i9-9880H CPU @ 2.30GHz
BenchmarkEncoder_Generic_Sonic-16 100000 25911 ns/op 503.06 MB/s 13542 B/op 4 allocs/op
BenchmarkEncoder_Generic_JsonIter-16 100000 46693 ns/op 279.16 MB/s 13434 B/op 77 allocs/op
BenchmarkEncoder_Generic_StdLib-16 100000 143080 ns/op 91.10 MB/s 48177 B/op 827 allocs/op
BenchmarkEncoder_Binding_Sonic-16 100000 6851 ns/op 1902.68 MB/s 14229 B/op 4 allocs/op
BenchmarkEncoder_Binding_JsonIter-16 100000 22264 ns/op 585.49 MB/s 9488 B/op 2 allocs/op
BenchmarkEncoder_Binding_StdLib-16 100000 18685 ns/op 697.61 MB/s 9479 B/op 1 allocs/op
BenchmarkEncoder_Parallel_Generic_Sonic-16 100000 4981 ns/op 2617.14 MB/s 10747 B/op 4 allocs/op
BenchmarkEncoder_Parallel_Generic_JsonIter-16 100000 11225 ns/op 1161.24 MB/s 13447 B/op 77 allocs/op
BenchmarkEncoder_Parallel_Generic_StdLib-16 100000 55846 ns/op 233.41 MB/s 48215 B/op 827 allocs/op
BenchmarkEncoder_Parallel_Binding_Sonic-16 100000 1767 ns/op 7375.09 MB/s 11514 B/op 4 allocs/op
BenchmarkEncoder_Parallel_Binding_JsonIter-16 100000 4904 ns/op 2657.84 MB/s 9487 B/op 2 allocs/op
BenchmarkEncoder_Parallel_Binding_StdLib-16 100000 3958 ns/op 3293.18 MB/s 9477 B/op 1 allocs/op
BenchmarkDecoder_Generic_Sonic-16 100000 55680 ns/op 234.11 MB/s 49755 B/op 313 allocs/op
BenchmarkDecoder_Generic_StdLib-16 100000 144991 ns/op 89.90 MB/s 50897 B/op 772 allocs/op
BenchmarkDecoder_Generic_JsonIter-16 100000 103197 ns/op 126.31 MB/s 55786 B/op 1068 allocs/op
BenchmarkDecoder_Binding_Sonic-16 100000 28399 ns/op 458.99 MB/s 24984 B/op 34 allocs/op
BenchmarkDecoder_Binding_StdLib-16 100000 132178 ns/op 98.62 MB/s 10560 B/op 207 allocs/op
BenchmarkDecoder_Binding_JsonIter-16 100000 39963 ns/op 326.18 MB/s 14674 B/op 385 allocs/op
BenchmarkDecoder_Parallel_Generic_Sonic-16 100000 10999 ns/op 1185.11 MB/s 49658 B/op 313 allocs/op
BenchmarkDecoder_Parallel_Generic_StdLib-16 100000 67083 ns/op 194.31 MB/s 50907 B/op 772 allocs/op
BenchmarkDecoder_Parallel_Generic_JsonIter-16 100000 54292 ns/op 240.09 MB/s 55809 B/op 1068 allocs/op
BenchmarkDecoder_Parallel_Binding_Sonic-16 100000 5699 ns/op 2287.37 MB/s 24968 B/op 34 allocs/op
BenchmarkDecoder_Parallel_Binding_StdLib-16 100000 35801 ns/op 364.09 MB/s 10559 B/op 207 allocs/op
BenchmarkDecoder_Parallel_Binding_JsonIter-16 100000 13783 ns/op 945.74 MB/s 14678 B/op 385 allocs/op
BenchmarkEncoder_Generic_Sonic-16 100000 25547 ns/op 510.23 MB/s 13762 B/op 4 allocs/op
BenchmarkEncoder_Generic_JsonIter-16 100000 44526 ns/op 292.75 MB/s 13433 B/op 77 allocs/op
BenchmarkEncoder_Generic_StdLib-16 100000 134480 ns/op 96.93 MB/s 48177 B/op 827 allocs/op
BenchmarkEncoder_Binding_Sonic-16 100000 6658 ns/op 1957.74 MB/s 14156 B/op 4 allocs/op
BenchmarkEncoder_Binding_JsonIter-16 100000 21367 ns/op 610.05 MB/s 9487 B/op 2 allocs/op
BenchmarkEncoder_Binding_StdLib-16 100000 17558 ns/op 742.41 MB/s 9480 B/op 1 allocs/op
BenchmarkEncoder_Parallel_Generic_Sonic-16 100000 4562 ns/op 2857.18 MB/s 10957 B/op 4 allocs/op
BenchmarkEncoder_Parallel_Generic_JsonIter-16 100000 10943 ns/op 1191.21 MB/s 13449 B/op 77 allocs/op
BenchmarkEncoder_Parallel_Generic_StdLib-16 100000 52174 ns/op 249.84 MB/s 48218 B/op 827 allocs/op
BenchmarkEncoder_Parallel_Binding_Sonic-16 100000 1422 ns/op 9168.12 MB/s 11030 B/op 4 allocs/op
BenchmarkEncoder_Parallel_Binding_JsonIter-16 100000 4630 ns/op 2815.35 MB/s 9496 B/op 2 allocs/op
BenchmarkEncoder_Parallel_Binding_StdLib-16 100000 4977 ns/op 2619.08 MB/s 9488 B/op 1 allocs/op
BenchmarkDecoder_Generic_Sonic-16 100000 57247 ns/op 227.70 MB/s 49727 B/op 313 allocs/op
BenchmarkDecoder_Generic_StdLib-16 100000 139698 ns/op 93.31 MB/s 50898 B/op 772 allocs/op
BenchmarkDecoder_Generic_JsonIter-16 100000 101967 ns/op 127.84 MB/s 55787 B/op 1068 allocs/op
BenchmarkDecoder_Binding_Sonic-16 100000 28254 ns/op 461.35 MB/s 25062 B/op 34 allocs/op
BenchmarkDecoder_Binding_StdLib-16 100000 123779 ns/op 105.31 MB/s 10560 B/op 207 allocs/op
BenchmarkDecoder_Binding_JsonIter-16 100000 38253 ns/op 340.75 MB/s 14674 B/op 385 allocs/op
BenchmarkDecoder_Parallel_Generic_Sonic-16 100000 10171 ns/op 1281.59 MB/s 49458 B/op 313 allocs/op
BenchmarkDecoder_Parallel_Generic_StdLib-16 100000 54916 ns/op 237.36 MB/s 50907 B/op 772 allocs/op
BenchmarkDecoder_Parallel_Generic_JsonIter-16 100000 48286 ns/op 269.95 MB/s 55811 B/op 1068 allocs/op
BenchmarkDecoder_Parallel_Binding_Sonic-16 100000 5282 ns/op 2467.83 MB/s 24683 B/op 34 allocs/op
BenchmarkDecoder_Parallel_Binding_StdLib-16 100000 31875 ns/op 408.94 MB/s 10559 B/op 207 allocs/op
BenchmarkDecoder_Parallel_Binding_JsonIter-16 100000 13810 ns/op 943.90 MB/s 14679 B/op 385 allocs/op
BenchmarkSearchOne_Gjson-16 100000 8992 ns/op 1448.28 MB/s 0 B/op 0 allocs/op
BenchmarkSearchOne_Jsoniter-16 100000 58313 ns/op 223.33 MB/s 27936 B/op 647 allocs/op
Expand Down
3 changes: 3 additions & 0 deletions ast/encode_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,9 @@ import (
)

func TestGC_Encode(t *testing.T) {
if debugSyncGC {
return
}
root, err := NewSearcher(_TwitterJson).GetByPath()
if err != nil {
t.Fatal(err)
Expand Down
22 changes: 17 additions & 5 deletions ast/parser_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
package ast

import (
`os`
`encoding/json`
`testing`
`runtime`
Expand All @@ -28,19 +29,30 @@ import (
`github.com/tidwall/gjson`
)

var (
debugSyncGC = os.Getenv("SONIC_SYNC_GC") != ""
debugAsyncGC = os.Getenv("SONIC_NO_ASYNC_GC") == ""
)

func TestMain(m *testing.M) {
go func () {
if !debugAsyncGC {
return
}
println("Begin GC looping...")
for {
runtime.GC()
debug.FreeOSMemory()
}
println("stop GC looping!")
for {
runtime.GC()
debug.FreeOSMemory()
}
println("stop GC looping!")
}()
m.Run()
}

func TestGC_Parse(t *testing.T) {
if debugSyncGC {
return
}
_, _, err := Loads(_TwitterJson)
if err != nil {
t.Fatal(err)
Expand Down
3 changes: 3 additions & 0 deletions ast/search_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,9 @@ import (


func TestGC_Search(t *testing.T) {
if debugSyncGC {
return
}
_, err := NewSearcher(_TwitterJson).GetByPath("statuses", 0, "id")
if err != nil {
t.Fatal(err)
Expand Down
2 changes: 2 additions & 0 deletions bench.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#!/usr/bin/env bash

pwd=$(pwd)
export SONIC_NO_ASYNC_GC=1

cd $pwd/encoder
go test -benchmem -run=^$ -benchtime=100000x -bench "^(BenchmarkEncoder_Generic_Sonic|BenchmarkEncoder_Generic_StdLib|BenchmarkEncoder_Generic_JsonIter|BenchmarkEncoder_Generic_GoJson|BenchmarkEncoder_Binding_Sonic|BenchmarkEncoder_Binding_StdLib|BenchmarkEncoder_Binding_JsonIter|BenchmarkEncoder_Binding_GoJson|BenchmarkEncoder_Parallel_Generic_Sonic|BenchmarkEncoder_Parallel_Generic_StdLib|BenchmarkEncoder_Parallel_Generic_JsonIter|BenchmarkEncoder_Parallel_Generic_GoJson|BenchmarkEncoder_Parallel_Binding_Sonic|BenchmarkEncoder_Parallel_Binding_StdLib|BenchmarkEncoder_Parallel_Binding_JsonIter|BenchmarkEncoder_Parallel_Binding_GoJson)$"
Expand All @@ -15,4 +16,5 @@ go test -benchmem -run=^$ -benchtime=100000x -bench '^(BenchmarkEncodeRaw|Benchm

go test -benchmem -run=^$ -benchtime=10000000x -bench "^(BenchmarkNodeGetByPath|BenchmarkStructGetByPath_Jsoniter|BenchmarkNodeIndex|BenchmarkStructIndex|BenchmarkSliceIndex|BenchmarkMapIndex|BenchmarkNodeGet|BenchmarkSliceGet|BenchmarkMapGet|BenchmarkNodeSet|BenchmarkMapSet|BenchmarkNodeSetByIndex|BenchmarkSliceSetByIndex|BenchmarkStructSetByIndex|BenchmarkNodeUnset|BenchmarkMapUnset|BenchmarkNodUnsetByIndex|BenchmarkSliceUnsetByIndex|BenchmarkNodeAdd|BenchmarkSliceAdd|BenchmarkMapAdd)$"

unset SONIC_NO_ASYNC_GC
cd $pwd
100 changes: 81 additions & 19 deletions decoder/assembler_amd64.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,17 @@ import (
`fmt`
`math`
`reflect`
`strconv`
`unsafe`

`github.com/bytedance/sonic/internal/caching`
`github.com/bytedance/sonic/internal/cpu`
`github.com/bytedance/sonic/internal/jit`
`github.com/bytedance/sonic/internal/native`
`github.com/bytedance/sonic/internal/native/types`
`github.com/bytedance/sonic/internal/rt`
`github.com/twitchyliquid64/golang-asm/obj`
`github.com/twitchyliquid64/golang-asm/obj/x86`
)

/** Register Allocations
Expand Down Expand Up @@ -67,7 +69,7 @@ const (
_FP_args = 72 // 72 bytes to pass arguments and return values for this function
_FP_fargs = 80 // 80 bytes for passing arguments to other Go functions
_FP_saves = 40 // 40 bytes for saving the registers before CALL instructions
_FP_locals = 96 // 96 bytes for local variables
_FP_locals = 96 // 96 bytes for local variables
)

const (
Expand Down Expand Up @@ -133,9 +135,10 @@ var (
)

var (
_DF = jit.Reg("R10") // reuse R10 in generic decoder for flags
_ET = jit.Reg("R10")
_EP = jit.Reg("R11")
_R10 = jit.Reg("R10") // used for gcWriteBarrier
_DF = jit.Reg("R10") // reuse R10 in generic decoder for flags
_ET = jit.Reg("R10")
_EP = jit.Reg("R11")
)

var (
Expand Down Expand Up @@ -536,7 +539,7 @@ func (self *_Assembler) vfollow(vt reflect.Type) {
self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
self.Sjmp("JNZ" , "_end_{n}") // JNZ _end_{n}
self.valloc(vt, _AX) // VALLOC ${vt}, AX
self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP)
self.WritePtrAX(1, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP)
self.Link("_end_{n}") // _end_{n}:
self.Emit("MOVQ" , _AX, _VP) // MOVQ AX, VP
}
Expand Down Expand Up @@ -664,7 +667,7 @@ func (self *_Assembler) unquote_once(p obj.Addr, n obj.Addr) {
self.malloc(_SI, _DX) // MALLOC SI, DX
self.Emit("MOVQ" , p, _DI) // MOVQ ${p}, DI
self.Emit("MOVQ" , n, _SI) // MOVQ ${n}, SI
self.Emit("MOVQ" , _DX, p) // MOVQ DX, ${p}
self.WriteRecNotAX(2, _DX, p, true, true) // MOVQ DX, ${p}
self.Emit("LEAQ" , _VAR_sr, _CX) // LEAQ sr, CX
self.Emit("XORL" , _R8, _R8) // XORL R8, R8
self.Emit("BTQ" , jit.Imm(_F_disable_urc), _ARG_fv) // BTQ ${_F_disable_urc}, fv
Expand Down Expand Up @@ -696,7 +699,7 @@ func (self *_Assembler) unquote_twice(p obj.Addr, n obj.Addr) {
self.malloc(_SI, _DX) // MALLOC SI, DX
self.Emit("MOVQ" , p, _DI) // MOVQ ${p}, DI
self.Emit("MOVQ" , n, _SI) // MOVQ ${n}, SI
self.Emit("MOVQ" , _DX, p) // MOVQ DX, ${p}
self.WriteRecNotAX(6, _DX, p, true, true) // MOVQ DX, ${p}
self.Emit("LEAQ" , _VAR_sr, _CX) // LEAQ sr, CX
self.Emit("MOVL" , jit.Imm(types.F_DOUBLE_UNQUOTE), _R8) // MOVL ${types.F_DOUBLE_UNQUOTE}, R8
self.Emit("BTQ" , jit.Imm(_F_disable_urc), _ARG_fv) // BTQ ${_F_disable_urc}, AX
Expand Down Expand Up @@ -866,7 +869,7 @@ func (self *_Assembler) unmarshal_func(t reflect.Type, fn obj.Addr, deref bool)
self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
self.Sjmp("JNZ" , "_deref_{n}") // JNZ _deref_{n}
self.valloc(t.Elem(), _AX) // VALLOC ${t.Elem()}, AX
self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP)
self.WritePtrAX(3, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP)
self.Link("_deref_{n}") // _deref_{n}:
}

Expand Down Expand Up @@ -1029,7 +1032,11 @@ func (self *_Assembler) _asm_OP_bin(_ *_Instr) {
/* call the decoder */
self.Emit("XORL" , _DX, _DX) // XORL DX, DX
self.Emit("MOVQ" , _VP, _DI) // MOVQ VP, DI
self.Emit("XCHGQ", _SI, jit.Ptr(_VP, 0)) // XCHGQ SI, (VP)

self.Emit("MOVQ" , jit.Ptr(_VP, 0), _R9) // MOVQ SI, (VP)
self.WriteRecNotAX(4, _SI, jit.Ptr(_VP, 0), true, false) // XCHGQ SI, (VP)
self.Emit("MOVQ" , _R9, _SI)

self.Emit("XCHGQ", _DX, jit.Ptr(_VP, 8)) // XCHGQ DX, 8(VP)
self.call(_F_b64decode) // CALL b64decode
self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
Expand Down Expand Up @@ -1066,7 +1073,7 @@ func (self *_Assembler) _asm_OP_bool(_ *_Instr) {
func (self *_Assembler) _asm_OP_num(_ *_Instr) {
self.parse_number() // PARSE NUMBER
self.slice_from(_VAR_st_Ep, 0) // SLICE st.Ep, $0
self.Emit("MOVQ", _DI, jit.Ptr(_VP, 0)) // MOVQ DI, (VP)
self.WriteRecNotAX(5, _DI, jit.Ptr(_VP, 0), false, false) // MOVQ DI, (VP)
self.Emit("MOVQ", _SI, jit.Ptr(_VP, 8)) // MOVQ SI, 8(VP)
}

Expand Down Expand Up @@ -1183,7 +1190,7 @@ func (self *_Assembler) _asm_OP_map_init(_ *_Instr) {
self.Sjmp("JNZ" , "_end_{n}") // JNZ _end_{n}
self.call_go(_F_makemap_small) // CALL_GO makemap_small
self.Emit("MOVQ" , jit.Ptr(_SP, 0), _AX) // MOVQ (SP), AX
self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP)
self.WritePtrAX(6, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP)
self.Link("_end_{n}") // _end_{n}:
self.Emit("MOVQ" , _AX, _VP) // MOVQ AX, VP
}
Expand Down Expand Up @@ -1316,7 +1323,7 @@ func (self *_Assembler) _asm_OP_slice_init(p *_Instr) {
self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 16)) // MOVQ CX, 16(SP)
self.call_go(_F_makeslice) // CALL_GO makeslice
self.Emit("MOVQ" , jit.Ptr(_SP, 24), _AX) // MOVQ 24(SP), AX
self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP)
self.WritePtrAX(7, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP)
self.Link("_done_{n}") // _done_{n}:
self.Emit("XORL" , _AX, _AX) // XORL AX, AX
self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP)
Expand All @@ -1338,7 +1345,7 @@ func (self *_Assembler) _asm_OP_slice_append(p *_Instr) {
self.Emit("MOVQ" , jit.Ptr(_SP, 40), _DI) // MOVQ 40(SP), DI
self.Emit("MOVQ" , jit.Ptr(_SP, 48), _AX) // MOVQ 48(SP), AX
self.Emit("MOVQ" , jit.Ptr(_SP, 56), _SI) // MOVQ 56(SP), SI
self.Emit("MOVQ" , _DI, jit.Ptr(_VP, 0)) // MOVQ DI, (VP)
self.WriteRecNotAX(8, _DI, jit.Ptr(_VP, 0), true, true)// MOVQ DI, (VP)
self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP)
self.Emit("MOVQ" , _SI, jit.Ptr(_VP, 16)) // MOVQ SI, 16(VP)
self.Link("_index_{n}") // _index_{n}:
Expand Down Expand Up @@ -1502,12 +1509,12 @@ func (self *_Assembler) _asm_OP_load(_ *_Instr) {
}

func (self *_Assembler) _asm_OP_save(_ *_Instr) {
self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX
self.Emit("CMPQ", _AX, jit.Imm(_MaxStack)) // CMPQ AX, ${_MaxStack}
self.Emit("MOVQ", jit.Ptr(_ST, 0), _CX) // MOVQ (ST), CX
self.Emit("CMPQ", _CX, jit.Imm(_MaxStack)) // CMPQ CX, ${_MaxStack}
self.Sjmp("JA" , _LB_stack_error) // JA _stack_error
self.Emit("MOVQ", _VP, jit.Sib(_ST, _AX, 1, 8)) // MOVQ VP, 8(ST)(AX)
self.Emit("ADDQ", jit.Imm(8), _AX) // ADDQ $8, AX
self.Emit("MOVQ", _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST)
self.WriteRecNotAX(0 , _VP, jit.Sib(_ST, _CX, 1, 8), false, false) // MOVQ VP, 8(ST)(CX)
self.Emit("ADDQ", jit.Imm(8), _CX) // ADDQ $8, CX
self.Emit("MOVQ", _CX, jit.Ptr(_ST, 0)) // MOVQ CX, (ST)
}

func (self *_Assembler) _asm_OP_drop(_ *_Instr) {
Expand Down Expand Up @@ -1559,3 +1566,58 @@ func (self *_Assembler) _asm_OP_switch(p *_Instr) {
self.Link("_default_{n}")
self.NOP()
}

var (
_V_writeBarrier = jit.Imm(int64(uintptr(unsafe.Pointer(&_runtime_writeBarrier))))

_F_gcWriteBarrierAX = jit.Func(gcWriteBarrierAX)
)

func (self *_Assembler) WritePtrAX(i int, rec obj.Addr, saveDI bool) {
self.Emit("MOVQ", _V_writeBarrier, _R10)
self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0))
self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
if saveDI {
self.save(_DI)
}
self.Emit("LEAQ", rec, _DI)
self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX
self.Rjmp("CALL", _R10)
if saveDI {
self.load(_DI)
}
self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
self.Emit("MOVQ", _AX, rec)
self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
}

func (self *_Assembler) WriteRecNotAX(i int, ptr obj.Addr, rec obj.Addr, saveDI bool, saveAX bool) {
if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX {
panic("rec contains AX!")
}
self.Emit("MOVQ", _V_writeBarrier, _R10)
self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0))
self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
if saveAX {
self.Emit("XCHGQ", ptr, _AX)
} else {
self.Emit("MOVQ", ptr, _AX)
}
if saveDI {
self.save(_DI)
}
self.Emit("LEAQ", rec, _DI)
self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX
self.Rjmp("CALL", _R10)
if saveDI {
self.load(_DI)
}
if saveAX {
self.Emit("XCHGQ", ptr, _AX)
}
self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
self.Emit("MOVQ", ptr, rec)
self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
}
Loading

0 comments on commit 43e4a00

Please sign in to comment.