forked from okx/xlayer-node
-
Notifications
You must be signed in to change notification settings - Fork 0
/
endpoints_debug.go
312 lines (272 loc) · 11 KB
/
endpoints_debug.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
package jsonrpc
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"sort"
"sync"
"time"
"github.com/0xPolygonHermez/zkevm-node/jsonrpc/types"
"github.com/0xPolygonHermez/zkevm-node/log"
"github.com/0xPolygonHermez/zkevm-node/state"
"github.com/ethereum/go-ethereum/common"
ethTypes "github.com/ethereum/go-ethereum/core/types"
"github.com/jackc/pgx/v4"
)
var defaultTraceConfig = &traceConfig{
DisableStorage: false,
DisableStack: false,
EnableMemory: false,
EnableReturnData: false,
Tracer: nil,
}
// DebugEndpoints is the debug jsonrpc endpoint
type DebugEndpoints struct {
cfg Config
state types.StateInterface
etherman types.EthermanInterface
txMan DBTxManager
}
// NewDebugEndpoints returns DebugEndpoints
func NewDebugEndpoints(cfg Config, state types.StateInterface, etherman types.EthermanInterface) *DebugEndpoints {
return &DebugEndpoints{
cfg: cfg,
state: state,
etherman: etherman,
}
}
type traceConfig struct {
DisableStorage bool `json:"disableStorage"`
DisableStack bool `json:"disableStack"`
EnableMemory bool `json:"enableMemory"`
EnableReturnData bool `json:"enableReturnData"`
Tracer *string `json:"tracer"`
TracerConfig json.RawMessage `json:"tracerConfig"`
}
type traceBlockTransactionResponse struct {
Result interface{} `json:"result"`
}
type traceBatchTransactionResponse struct {
TxHash common.Hash `json:"txHash"`
Result interface{} `json:"result"`
}
// TraceTransaction creates a response for debug_traceTransaction request.
// See https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-debug#debugtracetransaction
func (d *DebugEndpoints) TraceTransaction(hash types.ArgHash, cfg *traceConfig) (interface{}, types.Error) {
return d.txMan.NewDbTxScope(d.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) {
return d.buildTraceTransaction(ctx, hash.Hash(), cfg, dbTx)
})
}
// TraceBlockByNumber creates a response for debug_traceBlockByNumber request.
// See https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-debug#debugtraceblockbynumber
func (d *DebugEndpoints) TraceBlockByNumber(number types.BlockNumber, cfg *traceConfig) (interface{}, types.Error) {
return d.txMan.NewDbTxScope(d.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) {
blockNumber, rpcErr := number.GetNumericBlockNumber(ctx, d.state, d.etherman, dbTx)
if rpcErr != nil {
return nil, rpcErr
}
block, err := d.state.GetL2BlockByNumber(ctx, blockNumber, dbTx)
if errors.Is(err, state.ErrNotFound) {
return nil, types.NewRPCError(types.DefaultErrorCode, fmt.Sprintf("block #%d not found", blockNumber))
} else if err != nil {
return RPCErrorResponse(types.DefaultErrorCode, "failed to get block by number", err, true)
}
traces, rpcErr := d.buildTraceBlock(ctx, block.Transactions(), cfg, dbTx)
if rpcErr != nil {
return nil, rpcErr
}
return traces, nil
})
}
// TraceBlockByHash creates a response for debug_traceBlockByHash request.
// See https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-debug#debugtraceblockbyhash
func (d *DebugEndpoints) TraceBlockByHash(hash types.ArgHash, cfg *traceConfig) (interface{}, types.Error) {
return d.txMan.NewDbTxScope(d.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) {
block, err := d.state.GetL2BlockByHash(ctx, hash.Hash(), dbTx)
if errors.Is(err, state.ErrNotFound) {
return nil, types.NewRPCError(types.DefaultErrorCode, fmt.Sprintf("block %s not found", hash.Hash().String()))
} else if err != nil {
return RPCErrorResponse(types.DefaultErrorCode, "failed to get block by hash", err, true)
}
traces, rpcErr := d.buildTraceBlock(ctx, block.Transactions(), cfg, dbTx)
if rpcErr != nil {
return nil, rpcErr
}
return traces, nil
})
}
// TraceBatchByNumber creates a response for debug_traceBatchByNumber request.
// this endpoint tries to help clients to get traces at once for all the transactions
// attached to the same batch.
//
// IMPORTANT: in order to take advantage of the infrastructure automatically scaling,
// instead of parallelizing the trace transaction internally and pushing all the load
// to a single jRPC and Executor instance, the code will redirect the trace transaction
// requests to the same url, making them external calls, so we can process in parallel
// with multiple jRPC and Executor instances.
//
// the request flow will work as follows:
// -> user do a trace batch request
// -> jRPC balancer picks a jRPC server to handle the trace batch request
// -> picked jRPC sends parallel trace transaction requests for each transaction in the batch
// -> jRPC balancer sends each request to a different jRPC to handle the trace transaction requests
// -> picked jRPC server group trace transaction responses from other jRPC servers
// -> picked jRPC respond the initial request to the user with all the tx traces
func (d *DebugEndpoints) TraceBatchByNumber(httpRequest *http.Request, number types.BatchNumber, cfg *traceConfig) (interface{}, types.Error) {
type traceResponse struct {
blockNumber uint64
txIndex uint64
txHash common.Hash
trace interface{}
err error
}
// the size of the buffer defines
// how many txs it will process in parallel.
const bufferSize = 10
return d.txMan.NewDbTxScope(d.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) {
batchNumber, rpcErr := number.GetNumericBatchNumber(ctx, d.state, d.etherman, dbTx)
if rpcErr != nil {
return nil, rpcErr
}
batch, err := d.state.GetBatchByNumber(ctx, batchNumber, dbTx)
if errors.Is(err, state.ErrNotFound) {
return nil, types.NewRPCError(types.DefaultErrorCode, fmt.Sprintf("batch #%d not found", batchNumber))
} else if err != nil {
return RPCErrorResponse(types.DefaultErrorCode, "failed to get batch by number", err, true)
}
txs, _, err := d.state.GetTransactionsByBatchNumber(ctx, batch.BatchNumber, dbTx)
if !errors.Is(err, state.ErrNotFound) && err != nil {
return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load batch txs from state by number %v to create the traces", batchNumber), err, true)
}
receipts := make([]ethTypes.Receipt, 0, len(txs))
for _, tx := range txs {
receipt, err := d.state.GetTransactionReceipt(ctx, tx.Hash(), dbTx)
if err != nil {
return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load receipt for tx %v to get trace", tx.Hash().String()), err, true)
}
receipts = append(receipts, *receipt)
}
requests := make(chan (ethTypes.Receipt), bufferSize)
mu := &sync.Mutex{}
wg := sync.WaitGroup{}
wg.Add(len(receipts))
responses := make([]traceResponse, 0, len(receipts))
// gets the trace from the jRPC and adds it to the responses
loadTraceByTxHash := func(d *DebugEndpoints, receipt ethTypes.Receipt, cfg *traceConfig) {
response := traceResponse{
blockNumber: receipt.BlockNumber.Uint64(),
txIndex: uint64(receipt.TransactionIndex),
txHash: receipt.TxHash,
}
defer wg.Done()
trace, err := d.TraceTransaction(types.ArgHash(receipt.TxHash), cfg)
if err != nil {
err := fmt.Errorf("failed to get tx trace for tx %v, err: %w", receipt.TxHash.String(), err)
log.Errorf(err.Error())
response.err = err
} else {
response.trace = trace
}
// add to the responses
mu.Lock()
defer mu.Unlock()
responses = append(responses, response)
}
// goes through the buffer and loads the trace
// by all the transactions added in the buffer
// then add the results to the responses map
go func() {
index := uint(0)
for req := range requests {
go loadTraceByTxHash(d, req, cfg)
index++
}
}()
// add receipts to the buffer
for _, receipt := range receipts {
requests <- receipt
}
// wait the traces to be loaded
if waitTimeout(&wg, d.cfg.ReadTimeout.Duration) {
return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("failed to get traces for batch %v: timeout reached", batchNumber), nil, true)
}
close(requests)
// since the txs are attached to a L2 Block and the L2 Block is
// the struct attached to the Batch, in order to always respond
// the traces in the same order, we need to order the transactions
// first by block number and then by tx index, so we can have something
// close to the txs being sorted by a tx index related to the batch
sort.Slice(responses, func(i, j int) bool {
if responses[i].txIndex != responses[j].txIndex {
return responses[i].txIndex < responses[j].txIndex
}
return responses[i].blockNumber < responses[j].blockNumber
})
// build the batch trace response array
traces := make([]traceBatchTransactionResponse, 0, len(receipts))
for _, response := range responses {
if response.err != nil {
return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("failed to get traces for batch %v: failed to get trace for tx: %v, err: %v", batchNumber, response.txHash.String(), response.err.Error()), nil, true)
}
traces = append(traces, traceBatchTransactionResponse{
TxHash: response.txHash,
Result: response.trace,
})
}
return traces, nil
})
}
func (d *DebugEndpoints) buildTraceBlock(ctx context.Context, txs []*ethTypes.Transaction, cfg *traceConfig, dbTx pgx.Tx) (interface{}, types.Error) {
traces := []traceBlockTransactionResponse{}
for _, tx := range txs {
traceTransaction, err := d.buildTraceTransaction(ctx, tx.Hash(), cfg, dbTx)
if err != nil {
errMsg := fmt.Sprintf("failed to get trace for transaction %v: %v", tx.Hash().String(), err.Error())
return RPCErrorResponse(types.DefaultErrorCode, errMsg, err, true)
}
traceBlockTransaction := traceBlockTransactionResponse{
Result: traceTransaction,
}
traces = append(traces, traceBlockTransaction)
}
return traces, nil
}
func (d *DebugEndpoints) buildTraceTransaction(ctx context.Context, hash common.Hash, cfg *traceConfig, dbTx pgx.Tx) (interface{}, types.Error) {
traceCfg := cfg
if traceCfg == nil {
traceCfg = defaultTraceConfig
}
stateTraceConfig := state.TraceConfig{
DisableStack: traceCfg.DisableStack,
DisableStorage: traceCfg.DisableStorage,
EnableMemory: traceCfg.EnableMemory,
EnableReturnData: traceCfg.EnableReturnData,
Tracer: traceCfg.Tracer,
TracerConfig: traceCfg.TracerConfig,
}
result, err := d.state.DebugTransaction(ctx, hash, stateTraceConfig, dbTx)
if errors.Is(err, state.ErrNotFound) {
return RPCErrorResponse(types.DefaultErrorCode, "transaction not found", nil, false)
} else if err != nil {
errorMessage := fmt.Sprintf("failed to get trace: %v", err.Error())
return nil, types.NewRPCError(types.DefaultErrorCode, errorMessage)
}
return result.TraceResult, nil
}
// waitTimeout waits for the waitGroup for the specified max timeout.
// Returns true if waiting timed out.
func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool {
c := make(chan struct{})
go func() {
defer close(c)
wg.Wait()
}()
select {
case <-c:
return false // completed normally
case <-time.After(timeout):
return true // timed out
}
}