-
Notifications
You must be signed in to change notification settings - Fork 128
/
Copy pathmain.go
1278 lines (1115 loc) · 44.9 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright (c) 2018-2019, The Decred developers
// Copyright (c) 2017, Jonathan Chappelow
// See LICENSE for details.
package main
import (
"context"
"fmt"
"net"
"net/http"
_ "net/http/pprof"
"os"
"path"
"path/filepath"
"reflect"
"runtime"
"runtime/pprof"
"strings"
"sync"
"time"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/rpcclient/v5"
"github.com/decred/dcrdata/blockdata/v5"
"github.com/decred/dcrdata/db/cache/v3"
"github.com/decred/dcrdata/db/dbtypes/v2"
"github.com/decred/dcrdata/db/dcrpg/v5"
"github.com/decred/dcrdata/exchanges/v2"
"github.com/decred/dcrdata/gov/v3/agendas"
"github.com/decred/dcrdata/gov/v3/politeia"
"github.com/decred/dcrdata/mempool/v5"
m "github.com/decred/dcrdata/middleware/v3"
pstypes "github.com/decred/dcrdata/pubsub/types/v3"
"github.com/decred/dcrdata/pubsub/v4"
"github.com/decred/dcrdata/rpcutils/v3"
"github.com/decred/dcrdata/semver"
"github.com/decred/dcrdata/stakedb/v3"
"github.com/decred/dcrdata/v5/api"
"github.com/decred/dcrdata/v5/api/insight"
"github.com/decred/dcrdata/v5/explorer"
notify "github.com/decred/dcrdata/v5/notification"
"github.com/decred/dcrdata/v5/version"
"github.com/dmigwi/go-piparser/proposals"
"github.com/go-chi/chi"
"github.com/google/gops/agent"
)
func main() {
// Create a context that is cancelled when a shutdown request is received
// via requestShutdown.
ctx := withShutdownCancel(context.Background())
// Listen for both interrupt signals and shutdown requests.
go shutdownListener()
if err := _main(ctx); err != nil {
if logRotator != nil {
log.Error(err)
}
os.Exit(1)
}
os.Exit(0)
}
// _main does all the work. Deferred functions do not run after os.Exit(), so
// main wraps this function, which returns a code.
func _main(ctx context.Context) error {
// Parse the configuration file, and setup logger.
cfg, err := loadConfig()
if err != nil {
fmt.Printf("Failed to load dcrdata config: %s\n", err.Error())
return err
}
defer func() {
if logRotator != nil {
logRotator.Close()
}
}()
if cfg.CPUProfile != "" {
var f *os.File
f, err = os.Create(cfg.CPUProfile)
if err != nil {
return err
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
if cfg.UseGops {
// Start gops diagnostic agent, with shutdown cleanup.
if err = agent.Listen(agent.Options{}); err != nil {
return err
}
defer agent.Close()
}
// Display app version.
log.Infof("%s version %v (Go version %s)", version.AppName,
version.Version(), runtime.Version())
// Grab a Notifier. After all databases are synced, register handlers with
// the Register*Group methods, set the best block height with
// SetPreviousBlock and start receiving notifications with Listen. Create
// the notifier now so the *rpcclient.NotificationHandlers can be obtained,
// using (*Notifier).DcrdHandlers, for the rpcclient.Client constructor.
notifier := notify.NewNotifier(ctx)
// Connect to dcrd RPC server using a websocket.
dcrdClient, nodeVer, err := connectNodeRPC(cfg, notifier.DcrdHandlers())
if err != nil || dcrdClient == nil {
return fmt.Errorf("Connection to dcrd failed: %v", err)
}
defer func() {
if dcrdClient != nil {
log.Infof("Closing connection to dcrd.")
dcrdClient.Shutdown()
dcrdClient.WaitForShutdown()
}
log.Infof("Bye!")
time.Sleep(250 * time.Millisecond)
}()
// Display connected network (e.g. mainnet, testnet, simnet).
curnet, err := dcrdClient.GetCurrentNet()
if err != nil {
return fmt.Errorf("Unable to get current network from dcrd: %v", err)
}
log.Infof("Connected to dcrd (JSON-RPC API v%s) on %v",
nodeVer.String(), curnet.String())
if curnet != activeNet.Net {
log.Criticalf("Network of connected node, %s, does not match expected "+
"network, %s.", activeNet.Net, curnet)
return fmt.Errorf("expected network %s, got %s", activeNet.Net, curnet)
}
// StakeDatabase
stakeDB, stakeDBHeight, err := stakedb.NewStakeDatabase(dcrdClient, activeChain, cfg.DataDir)
if err != nil {
log.Errorf("Unable to create stake DB: %v", err)
if stakeDBHeight >= 0 {
log.Infof("Attempting to recover stake DB...")
stakeDB, err = stakedb.LoadAndRecover(dcrdClient, activeChain, cfg.DataDir, stakeDBHeight-288)
stakeDBHeight = int64(stakeDB.Height())
}
if err != nil {
if stakeDB != nil {
_ = stakeDB.Close()
}
return fmt.Errorf("StakeDatabase recovery failed: %v", err)
}
}
defer stakeDB.Close()
log.Infof("Loaded StakeDatabase at height %d", stakeDBHeight)
var piParser dcrpg.ProposalsFetcher
if !cfg.DisablePiParser {
log.Infof("Setting up the Politeia's proposals clone repository. Please wait...")
// If repoName and repoOwner are set to empty strings the defaults are used.
parser, err := proposals.NewParser(cfg.PiPropRepoOwner, cfg.PiPropRepoName, cfg.DataDir)
if err != nil {
// since this piparser isn't a requirement to run the explorer, its
// failure should not block the system from running.
log.Error(err)
}
if parser != nil {
piParser = parser
}
}
// Auxiliary DB (PostgreSQL)
var newPGIndexes, updateAllAddresses bool
pgHost, pgPort := cfg.PGHost, ""
if !strings.HasPrefix(pgHost, "/") {
pgHost, pgPort, err = net.SplitHostPort(cfg.PGHost)
if err != nil {
return fmt.Errorf("SplitHostPort failed: %v", err)
}
}
dbi := dcrpg.DBInfo{
Host: pgHost,
Port: pgPort,
User: cfg.PGUser,
Pass: cfg.PGPass,
DBName: cfg.PGDBName,
QueryTimeout: cfg.PGQueryTimeout,
}
// If using {netname} then replace it with netName(activeNet).
dbi.DBName = strings.Replace(dbi.DBName, "{netname}", netName(activeNet), -1)
// Rough estimate of capacity in rows, using size of struct plus some
// for the string buffer of the Address field.
rowCap := cfg.AddrCacheCap / int(32+reflect.TypeOf(dbtypes.AddressRowCompact{}).Size())
log.Infof("Address cache capacity: %d addresses: ~%.0f MiB tx data (%d items) + %.0f MiB UTXOs",
cfg.AddrCacheLimit, float64(cfg.AddrCacheCap)/1024/1024, rowCap, float64(cfg.AddrCacheUXTOCap)/1024/1024)
// Open and upgrade the database.
dbCfg := dcrpg.ChainDBCfg{
DBi: &dbi,
Params: activeChain,
DevPrefetch: !cfg.NoDevPrefetch,
HidePGConfig: cfg.HidePGConfig,
AddrCacheAddrCap: cfg.AddrCacheLimit,
AddrCacheRowCap: rowCap,
AddrCacheUTXOByteCap: cfg.AddrCacheUXTOCap,
}
mpChecker := rpcutils.NewMempoolAddressChecker(dcrdClient, activeChain)
chainDB, err := dcrpg.NewChainDBWithCancel(ctx, &dbCfg,
stakeDB, mpChecker, piParser, dcrdClient, requestShutdown)
if chainDB != nil {
defer chainDB.Close()
}
if err != nil {
return err
}
if cfg.DropIndexes {
log.Info("Dropping all table indexing and quitting...")
err = chainDB.DeindexAll()
requestShutdown()
return err
}
// Check for missing indexes.
missingIndexes, descs, err := chainDB.MissingIndexes()
if err != nil {
return err
}
// If any indexes are missing, forcibly drop any existing indexes, and
// create them all after block sync.
if len(missingIndexes) > 0 {
newPGIndexes = true
updateAllAddresses = true
// Warn if this is not a fresh sync.
if chainDB.Height() > 0 {
log.Warnf("Some table indexes not found!")
for im, mi := range missingIndexes {
log.Warnf(` - Missing Index "%s": "%s"`, mi, descs[im])
}
log.Warnf("Forcing new index creation and addresses table spending info update.")
}
}
// Heights gets the current height of each DB, the minimum of the DB heights
// (dbHeight), and the chain server height.
Heights := func() (nodeHeight, chainDBHeight int64, err error) {
_, nodeHeight, err = dcrdClient.GetBestBlock()
if err != nil {
err = fmt.Errorf("unable to get block from node: %v", err)
return
}
chainDBHeight, err = chainDB.HeightDB()
if err != nil {
log.Errorf("chainDB.HeightDB failed: %v", err)
return
}
if chainDBHeight == -1 {
log.Infof("chainDB block summary table is empty.")
}
log.Debugf("chainDB height: %d", chainDBHeight)
return
}
// Check for database tip blocks that have been orphaned. If any are found,
// purge blocks to get to a common ancestor. Only message when purging more
// than requested in the configuration settings.
blocksToPurge := cfg.PurgeNBestBlocks
_, auxHeight, err := Heights()
if err != nil {
return fmt.Errorf("Failed to get Heights for tip check: %v", err)
}
if auxHeight > -1 {
orphaned, err := rpcutils.OrphanedTipLength(ctx, dcrdClient, auxHeight, chainDB.BlockHash)
if err != nil {
return fmt.Errorf("Failed to compare tip blocks for the aux DB: %v", err)
}
if int(orphaned) > blocksToPurge {
blocksToPurge = int(orphaned)
log.Infof("Orphaned tip detected on aux DB. Purging %d blocks", blocksToPurge)
}
}
// Give a chance to abort a purge.
if shutdownRequested(ctx) {
return nil
}
if blocksToPurge > 0 {
// The number of blocks to purge for each DB is computed so that the DBs
// will end on the same height.
_, chainDBHeight, err := Heights()
if err != nil {
return fmt.Errorf("Heights failed: %v", err)
}
// Determine the largest DB height.
maxHeight := chainDBHeight
// The final best block after purge.
purgeToBlock := maxHeight - int64(blocksToPurge)
// Purge NAux blocks from auxiliary DB.
NAux := chainDBHeight - purgeToBlock
log.Infof("Purging PostgreSQL data for the %d best blocks...", NAux)
s, heightDB, err := chainDB.PurgeBestBlocks(NAux)
if err != nil {
return fmt.Errorf("Failed to purge %d blocks from PostgreSQL: %v",
NAux, err)
}
if s != nil {
log.Infof("Successfully purged data for %d blocks from PostgreSQL "+
"(new height = %d):\n%v", s.Blocks, heightDB, s)
} // otherwise likely sql.ErrNoRows (heightDB was already -1)
}
// Get the last block added to the aux DB.
lastBlockPG, err := chainDB.HeightDB()
if err != nil {
return fmt.Errorf("Unable to get height from PostgreSQL DB: %v", err)
}
// For consistency with StakeDatabase, a non-negative height is needed.
heightDB := lastBlockPG
if heightDB < 0 {
heightDB = 0
}
charts := cache.NewChartData(ctx, uint32(heightDB), activeChain)
chainDB.RegisterCharts(charts)
// Aux DB height and stakedb height must be equal. StakeDatabase will
// catch up automatically if it is behind, but we must rewind it here if
// it is ahead of chainDB. For chainDB to receive notification from
// StakeDatabase when the required blocks are connected, the
// StakeDatabase must be at the same height or lower than chainDB.
stakeDBHeight = int64(stakeDB.Height())
if stakeDBHeight > heightDB {
// Have chainDB rewind it's the StakeDatabase. stakeDBHeight is
// always rewound to a height of zero even when lastBlockPG is -1,
// hence we rewind to heightDB.
log.Infof("Rewinding StakeDatabase from block %d to %d.",
stakeDBHeight, heightDB)
stakeDBHeight, err = chainDB.RewindStakeDB(ctx, heightDB)
if err != nil {
return fmt.Errorf("RewindStakeDB failed: %v", err)
}
// Verify that the StakeDatabase is at the intended height.
if stakeDBHeight != heightDB {
return fmt.Errorf("failed to rewind stakedb: got %d, expecting %d",
stakeDBHeight, heightDB)
}
}
// TODO: just use getblockchaininfo to see if it still syncing and what
// height the network's best block is at.
blockHash, nodeHeight, err := dcrdClient.GetBestBlock()
if err != nil {
return fmt.Errorf("Unable to get block from node: %v", err)
}
block, err := dcrdClient.GetBlockHeader(blockHash)
if err != nil {
return fmt.Errorf("unable to fetch the block from the node: %v", err)
}
// bestBlockAge is the time since the dcrd best block was mined.
bestBlockAge := time.Since(block.Timestamp).Minutes()
// Since mining a block take approximately ChainParams.TargetTimePerBlock then the
// expected height of the best block from dcrd now should be this.
expectedHeight := int64(bestBlockAge/float64(activeChain.TargetTimePerBlock)) + nodeHeight
// Estimate how far chainDB is behind the node.
blocksBehind := expectedHeight - lastBlockPG
if blocksBehind < 0 {
return fmt.Errorf("Node is still syncing. Node height = %d, "+
"DB height = %d", expectedHeight, heightDB)
}
// PG gets winning tickets out of baseDB's pool info cache, so it must
// be big enough to hold the needed blocks' info, and charged with the
// data from disk. The cache is updated on each block connect.
tpcSize := int(blocksBehind) + 200
log.Debugf("Setting ticket pool cache capacity to %d blocks", tpcSize)
err = stakeDB.SetPoolCacheCapacity(tpcSize)
if err != nil {
return err
}
// Charge stakedb pool info cache, including previous PG blocks.
if err = chainDB.ChargePoolInfoCache(heightDB - 2); err != nil {
return fmt.Errorf("Failed to charge pool info cache: %v", err)
}
// Block data collector. Needs a StakeDatabase too.
collector := blockdata.NewCollector(dcrdClient, activeChain, stakeDB)
if collector == nil {
return fmt.Errorf("Failed to create block data collector")
}
// Build a slice of each required saver type for each data source.
blockDataSavers := []blockdata.BlockDataSaver{chainDB}
mempoolSavers := []mempool.MempoolDataSaver{chainDB.MPC} // mempool.MempoolDataCache
// Allow Ctrl-C to halt startup here.
if shutdownRequested(ctx) {
return nil
}
// WaitGroup for monitoring goroutines
var wg sync.WaitGroup
// ExchangeBot
var xcBot *exchanges.ExchangeBot
if cfg.EnableExchangeBot && activeChain.Name != "mainnet" {
log.Warnf("disabling exchange monitoring. only available on mainnet")
cfg.EnableExchangeBot = false
}
if cfg.EnableExchangeBot {
botCfg := exchanges.ExchangeBotConfig{
BtcIndex: cfg.ExchangeCurrency,
MasterBot: cfg.RateMaster,
MasterCertFile: cfg.RateCertificate,
}
if cfg.DisabledExchanges != "" {
botCfg.Disabled = strings.Split(cfg.DisabledExchanges, ",")
}
xcBot, err = exchanges.NewExchangeBot(&botCfg)
if err != nil {
log.Errorf("Could not create exchange monitor. Exchange info will be disabled: %v", err)
} else {
var xcList, prepend string
for k := range xcBot.Exchanges {
xcList += prepend + k
prepend = ", "
}
log.Infof("ExchangeBot monitoring %s", xcList)
wg.Add(1)
go xcBot.Start(ctx, &wg)
}
}
// Creates a new or loads an existing agendas db instance that helps to
// store and retrieves agendas data. Agendas votes are On-Chain
// transactions that appear in the decred blockchain. If corrupted data is
// is found, its deleted pending the data update that restores valid data.
var agendaDB *agendas.AgendaDB
agendaDB, err = agendas.NewAgendasDB(
dcrdClient, filepath.Join(cfg.DataDir, cfg.AgendasDBFileName))
if err != nil {
return fmt.Errorf("failed to create new agendas db instance: %v", err)
}
// Creates a new or loads an existing proposals db instance that helps to
// store and retrieve proposals data. Proposals votes is Off-Chain
// data stored in github repositories away from the decred blockchain. It also
// creates a new http client needed to query Politeia API endpoints.
// When piparser is disabled, disable the API calls too.
var proposalsInstance explorer.PoliteiaBackend
if !cfg.DisablePiParser {
proposalsInstance, err = politeia.NewProposalsDB(cfg.PoliteiaAPIURL,
filepath.Join(cfg.DataDir, cfg.ProposalsFileName))
if err != nil {
return fmt.Errorf("failed to create new proposals db instance: %v", err)
}
} else {
log.Info("Piparser is disabled. Proposals API has been disabled too")
}
// A vote tracker tracks current block and stake versions and votes. Only
// initialize the vote tracker if not on simnet. nil tracker is a sentinel
// value throughout.
var tracker *agendas.VoteTracker
if !cfg.SimNet {
tracker, err = agendas.NewVoteTracker(activeChain, dcrdClient,
chainDB.AgendaVoteCounts)
if err != nil {
return fmt.Errorf("Unable to initialize vote tracker: %v", err)
}
}
// Create the explorer system.
explore := explorer.New(&explorer.ExplorerConfig{
DataSource: chainDB,
UseRealIP: cfg.UseRealIP,
AppVersion: version.Version(),
DevPrefetch: !cfg.NoDevPrefetch,
Viewsfolder: "views",
XcBot: xcBot,
AgendasSource: agendaDB,
Tracker: tracker,
ProposalsSource: proposalsInstance,
PoliteiaURL: cfg.PoliteiaAPIURL,
MainnetLink: cfg.MainnetLink,
TestnetLink: cfg.TestnetLink,
ReloadHTML: cfg.ReloadHTML,
OnionAddress: cfg.OnionAddress,
})
// TODO: allow views config
if explore == nil {
return fmt.Errorf("failed to create new explorer (templates missing?)")
}
explore.UseSIGToReloadTemplates()
defer explore.StopWebsocketHub()
// Create the pub sub hub.
psHub, err := pubsub.NewPubSubHub(chainDB)
if err != nil {
return fmt.Errorf("failed to create new pubsubhub: %v", err)
}
defer psHub.StopWebsocketHub()
blockDataSavers = append(blockDataSavers, psHub)
mempoolSavers = append(mempoolSavers, psHub) // individual transactions are from mempool monitor
// Store explorerUI data after pubsubhub.
blockDataSavers = append(blockDataSavers, explore)
mempoolSavers = append(mempoolSavers, explore)
// Create the mempool data collector.
mpoolCollector := mempool.NewMempoolDataCollector(dcrdClient, activeChain)
if mpoolCollector == nil {
// Shutdown goroutines.
requestShutdown()
return fmt.Errorf("Failed to create mempool data collector")
}
// The MempoolMonitor receives notifications of new transactions on
// notify.NtfnChans.NewTxChan, and of new blocks on the same channel with a
// nil transaction message. The mempool monitor will process the
// transactions, and forward new ones on via the mpDataToPSHub with an
// appropriate signal to the underlying WebSocketHub on signalToPSHub.
signalToPSHub := psHub.HubRelay()
signalToExplorer := explore.MempoolSignal()
mempoolSigOuts := []chan<- pstypes.HubMessage{signalToPSHub, signalToExplorer}
mpm, err := mempool.NewMempoolMonitor(ctx, mpoolCollector, mempoolSavers,
activeChain, dcrdClient, mempoolSigOuts, true)
// Ensure the initial collect/store succeeded.
if err != nil {
// Shutdown goroutines.
requestShutdown()
return fmt.Errorf("NewMempoolMonitor: %v", err)
}
// Use the MempoolMonitor in aux DB to get unconfirmed transaction data.
chainDB.UseMempoolChecker(mpm)
// Prepare for sync by setting up the channels for status/progress updates
// (barLoad) or full explorer page updates (latestBlockHash).
// barLoad is used to send sync status updates to websocket clients (e.g.
// browsers with the status page opened) via the goroutines launched by
// BeginSyncStatusUpdates.
var barLoad chan *dbtypes.ProgressBarLoad
// latestBlockHash communicates the hash of block most recently processed
// during synchronization. This is done if all of the explorer pages (not
// just the status page) are to be served during sync.
var latestBlockHash chan *chainhash.Hash
// Display the blockchain syncing status page if the number of blocks behind
// the node's best block height are more than the set limit. The sync status
// page should also be displayed when updateAllAddresses and newPGIndexes
// are true, indicating maintenance or an initial sync.
nodeHeight, chainDBHeight, err := Heights()
if err != nil {
return fmt.Errorf("Heights failed: %v", err)
}
blocksBehind = nodeHeight - chainDBHeight
log.Debugf("dbHeight: %d / blocksBehind: %d", chainDBHeight, blocksBehind)
displaySyncStatusPage := blocksBehind > int64(cfg.SyncStatusLimit) || // over limit
updateAllAddresses || newPGIndexes // maintenance or initial sync
// Initiate the sync status monitor and the coordinating goroutines if the
// sync status is activated, otherwise coordinate updating the full set of
// explorer pages.
if displaySyncStatusPage {
// Start goroutines that keep the update the shared progress bar data,
// and signal the websocket hub to send progress updates to clients.
barLoad = make(chan *dbtypes.ProgressBarLoad, 2)
explore.BeginSyncStatusUpdates(barLoad)
} else {
// Start a goroutine to update the explorer pages when the DB sync
// functions send a new block hash on the following channel.
latestBlockHash = make(chan *chainhash.Hash, 32)
// The BlockConnected handler should not be started until after sync.
go func() {
// Keep receiving updates until the channel is closed, or a nil Hash
// pointer received.
for hash := range latestBlockHash {
if hash == nil {
log.Debugf("main is now done updating explorer pages with latest block hash.")
return
}
// Fetch the blockdata by block hash.
d, msgBlock, err := collector.CollectHash(hash)
if err != nil {
log.Warnf("failed to fetch blockdata for (%s) hash. error: %v",
hash.String(), err)
continue
}
// Store the blockdata for the explorer pages.
if err = explore.Store(d, msgBlock); err != nil {
log.Warnf("failed to store (%s) hash's blockdata for the explorer pages error: %v",
hash.String(), err)
}
}
}()
// Before starting the DB sync, trigger the explorer to display data for
// the current best block.
// Retrieve the hash of the best block across every DB.
latestDBBlockHash, err := dcrdClient.GetBlockHash(chainDBHeight)
if err != nil {
return fmt.Errorf("failed to fetch the block at height (%d): %v",
chainDBHeight, err)
}
// Signal to load this block's data into the explorer. Future signals
// will come from the sync methods of either baseDB or chainDB.
latestBlockHash <- latestDBBlockHash
}
// Create the Insight socket.io server, and add it to block savers if in
// full/pg mode. Since insightSocketServer is added into the url before even
// the sync starts, this implementation cannot be moved to
// initiateHandlersAndCollectBlocks function.
insightSocketServer, err := insight.NewSocketServer(activeChain, dcrdClient)
if err != nil {
return fmt.Errorf("Could not create Insight socket.io server: %v", err)
}
defer insightSocketServer.Close()
blockDataSavers = append(blockDataSavers, insightSocketServer)
// Start dcrdata's JSON web API.
app := api.NewContext(&api.AppContextConfig{
Client: dcrdClient,
Params: activeChain,
DataSource: chainDB,
XcBot: xcBot,
AgendasDBInstance: agendaDB,
MaxAddrs: cfg.MaxCSVAddrs,
Charts: charts,
IsPiparserDisabled: cfg.DisablePiParser,
})
// Start the notification hander for keeping /status up-to-date.
wg.Add(1)
go app.StatusNtfnHandler(ctx, &wg, chainDB.UpdateChan())
// Initial setting of DBHeight. Subsequently, Store() will send this.
if chainDBHeight >= 0 {
// Do not sent 4294967295 = uint32(-1) if there are no blocks.
chainDB.SignalHeight(uint32(chainDBHeight))
}
// Configure the URL path to http handler router for the API.
apiMux := api.NewAPIRouter(app, cfg.IndentJSON, cfg.UseRealIP, cfg.CompressAPI)
// File downloads piggy-back on the API.
fileMux := api.NewFileRouter(app, cfg.UseRealIP)
// Configure the explorer web pages router.
webMux := chi.NewRouter()
if cfg.ServerHeader != "" {
log.Debugf("Using Server HTTP response header %q", cfg.ServerHeader)
webMux.Use(m.Server(cfg.ServerHeader))
}
webMux.With(explore.SyncStatusPageIntercept).Group(func(r chi.Router) {
r.Get("/", explore.Home)
r.Get("/visualblocks", explore.VisualBlocks)
})
webMux.Get("/ws", explore.RootWebsocket)
webMux.Get("/ps", psHub.WebSocketHandler)
// Make the static assets available under a path with the given prefix.
mountAssetPaths := func(pathPrefix string) {
if !strings.HasSuffix(pathPrefix, "/") {
pathPrefix += "/"
}
webMux.Get(pathPrefix+"favicon.ico", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "./public/images/favicon/favicon.ico")
})
cacheControlMaxAge := int64(cfg.CacheControlMaxAge)
FileServer(webMux, pathPrefix+"js", "./public/js", cacheControlMaxAge)
FileServer(webMux, pathPrefix+"css", "./public/css", cacheControlMaxAge)
FileServer(webMux, pathPrefix+"fonts", "./public/fonts", cacheControlMaxAge)
FileServer(webMux, pathPrefix+"images", "./public/images", cacheControlMaxAge)
FileServer(webMux, pathPrefix+"dist", "./public/dist", cacheControlMaxAge)
}
// Mount under root (e.g. /js, /css, etc.).
mountAssetPaths("/")
// HTTP profiler
if cfg.HTTPProfile {
profPath := cfg.HTTPProfPath
log.Warnf("Starting the HTTP profiler on path %s.", profPath)
// http pprof uses http.DefaultServeMux
http.Handle("/", http.RedirectHandler(profPath+"/debug/pprof/", http.StatusSeeOther))
webMux.Mount(profPath, http.StripPrefix(profPath, http.DefaultServeMux))
}
// SyncStatusAPIIntercept returns a json response if the sync status page is
// enabled (no the full explorer while syncing).
webMux.With(explore.SyncStatusAPIIntercept).Group(func(r chi.Router) {
// Mount the dcrdata's REST API.
r.Mount("/api", apiMux.Mux)
// Setup and mount the Insight API.
insightApp := insight.NewInsightApi(dcrdClient, chainDB,
activeChain, mpm, cfg.IndentJSON, app.Status)
insightApp.SetReqRateLimit(cfg.InsightReqRateLimit)
insightMux := insight.NewInsightApiRouter(insightApp, cfg.UseRealIP,
cfg.CompressAPI, cfg.MaxCSVAddrs)
r.Mount("/insight/api", insightMux.Mux)
if insightSocketServer != nil {
r.With(m.NoOrigin).Get("/insight/socket.io/", insightSocketServer.ServeHTTP)
}
})
// HTTP Error 503 StatusServiceUnavailable for file requests before sync.
webMux.With(explore.SyncStatusFileIntercept).Group(func(r chi.Router) {
r.Mount("/download", fileMux.Mux)
})
webMux.With(explore.SyncStatusPageIntercept).Group(func(r chi.Router) {
r.NotFound(explore.NotFound)
r.Mount("/explorer", explore.Mux)
r.Get("/days", explore.DayBlocksListing)
r.Get("/weeks", explore.WeekBlocksListing)
r.Get("/months", explore.MonthBlocksListing)
r.Get("/years", explore.YearBlocksListing)
r.Get("/blocks", explore.Blocks)
r.Get("/ticketpricewindows", explore.StakeDiffWindows)
r.Get("/side", explore.SideChains)
r.Get("/rejects", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/disapproved", http.StatusPermanentRedirect)
})
r.Get("/disapproved", explore.DisapprovedBlocks)
r.Get("/mempool", explore.Mempool)
r.Get("/parameters", explore.ParametersPage)
r.With(explore.BlockHashPathOrIndexCtx).Get("/block/{blockhash}", explore.Block)
r.With(explorer.TransactionHashCtx).Get("/tx/{txid}", explore.TxPage)
r.With(explorer.TransactionHashCtx, explorer.TransactionIoIndexCtx).Get("/tx/{txid}/{inout}/{inoutid}", explore.TxPage)
r.With(explorer.AddressPathCtx).Get("/address/{address}", explore.AddressPage)
r.With(explorer.AddressPathCtx).Get("/addresstable/{address}", explore.AddressTable)
r.Get("/agendas", explore.AgendasPage)
r.With(explorer.AgendaPathCtx).Get("/agenda/{agendaid}", explore.AgendaPage)
r.Get("/proposals", explore.ProposalsPage)
r.With(explorer.ProposalPathCtx).Get("/proposal/{proposalrefid}", explore.ProposalPage)
r.Get("/decodetx", explore.DecodeTxPage)
r.Get("/search", explore.Search)
r.Get("/charts", explore.Charts)
r.Get("/ticketpool", explore.Ticketpool)
r.Get("/stats", explore.StatsPage)
r.Get("/market", explore.MarketPage)
r.Get("/statistics", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/stats", http.StatusPermanentRedirect)
})
// MenuFormParser will typically redirect, but going to the homepage as a
// fallback.
r.With(explorer.MenuFormParser).Post("/set", explore.Home)
r.Get("/attack-cost", explore.AttackCost)
})
// Configure a page for the bare "/insight" path. This mounts the static
// assets under /insight (e.g. /insight/js) to support the page's complete
// loading when the root mounter is not accessible, such as the case in
// certain reverse proxy configurations that map /insight as the root path.
webMux.With(m.OriginalRequestURI).Get("/insight", explore.InsightRootPage)
// Serve static assets under /insight for when the a reverse proxy prefixes
// all requests with "/insight". (e.g. /insight/js, /insight/css, etc.).
mountAssetPaths("/insight")
// Start the web server.
listenAndServeProto(ctx, &wg, cfg.APIListen, cfg.APIProto, webMux)
// Last chance to quit before syncing if the web server could not start.
if shutdownRequested(ctx) {
return nil
}
log.Infof("Starting blockchain sync...")
explore.SetDBsSyncing(true)
psHub.SetReady(false)
// Coordinate the sync of both sqlite and auxiliary DBs with the network.
// This closure captures the RPC client and the quit channel.
// sqlite has been dropped, and this can probably be simplified.
getSyncd := func(updateAddys, newPGInds bool) (int64, error) {
// Simultaneously synchronize the ChainDB (PostgreSQL) and the
// stake info DB. Results are returned over channels:
pgSyncRes := make(chan dbtypes.SyncResult)
// Use either the plain rpcclient.Client or a rpcutils.BlockPrefetchClient.
var bf rpcutils.BlockFetcher
if cfg.NoBlockPrefetch {
bf = dcrdClient
} else {
pfc := rpcutils.NewBlockPrefetchClient(dcrdClient)
defer func() {
pfc.Stop()
log.Debugf("Block prefetcher hits = %d, misses = %d.",
pfc.Hits(), pfc.Misses())
}()
bf = pfc
}
// Synchronization between DBs via rpcutils.BlockGate
smartClient := rpcutils.NewBlockGate(bf, 4)
// Now that stakedb is either catching up or waiting for a block, start
// the chainDB sync, which is the master block getter, retrieving and
// making available blocks to the baseDB. In return, baseDB maintains a
// StakeDatabase at the best block's height. For a detailed description
// on how the DBs' synchronization is coordinated, see the documents in
// db/dcrpg/sync.go.
go chainDB.SyncChainDBAsync(ctx, pgSyncRes, smartClient, updateAddys,
newPGInds, latestBlockHash, barLoad)
// Wait for the results from both of these DBs.
return waitForSync(ctx, pgSyncRes)
}
chainDBHeight, err = getSyncd(updateAllAddresses, newPGIndexes)
if err != nil {
requestShutdown()
return err
}
// After sync and indexing, must use upsert statement, which checks for
// duplicate entries and updates instead of erroring. SyncChainDB should
// set this on successful sync, but do it again anyway.
chainDB.EnableDuplicateCheckOnInsert(true)
// The sync routines may have lengthy tasks, such as table indexing, that
// follow main sync loop. Before enabling the chain monitors, again ensure
// the DBs are at the node's best block.
ensureSync := func() error {
newPGIndexes, updateAllAddresses = false, false
_, height, err := dcrdClient.GetBestBlock()
if err != nil {
return fmt.Errorf("unable to get block from node: %v", err)
}
for chainDBHeight < height {
chainDBHeight, err = getSyncd(updateAllAddresses, newPGIndexes)
if err != nil {
requestShutdown()
return err
}
_, height, err = dcrdClient.GetBestBlock()
if err != nil {
return fmt.Errorf("unable to get block from node: %v", err)
}
}
app.Status.SetHeight(uint32(height))
return nil
}
if err = ensureSync(); err != nil {
return err
}
// Exits immediately after the sync completes if SyncAndQuit is to true
// because all we needed then was the blockchain sync be completed successfully.
if cfg.SyncAndQuit {
log.Infof("All ready, at height %d. Quitting.", chainDBHeight)
return nil
}
log.Info("Mainchain sync complete.")
// Ensure all side chains known by dcrd are also present in the auxiliary DB
// and import them if they are not already there.
if cfg.ImportSideChains {
// First identify the side chain blocks that are missing from the DB.
log.Info("Aux DB -> Retrieving side chain blocks from dcrd...")
sideChainBlocksToStore, nSideChainBlocks, err := chainDB.MissingSideChainBlocks()
if err != nil {
return fmt.Errorf("Aux DB -> Unable to determine missing side chain blocks: %v", err)
}
nSideChains := len(sideChainBlocksToStore)
// Importing side chain blocks involves only the aux (postgres) DBs
// since stakedb only supports mainchain. TODO: Get stakedb to work with
// side chain blocks to get ticket pool info.
// Collect and store data for each side chain.
log.Infof("Aux DB -> Importing %d new block(s) from %d known side chains...",
nSideChainBlocks, nSideChains)
// Disable recomputing project fund balance, and clearing address
// balance and counts cache.
chainDB.InBatchSync = true
var sideChainsStored, sideChainBlocksStored int
for _, sideChain := range sideChainBlocksToStore {
// Process this side chain only if there are blocks in it that need
// to be stored.
if len(sideChain.Hashes) == 0 {
continue
}
sideChainsStored++
// Collect and store data for each block in this side chain.
for _, hash := range sideChain.Hashes {
// Validate the block hash.
blockHash, err := chainhash.NewHashFromStr(hash)
if err != nil {
log.Errorf("Aux DB -> Invalid block hash %s: %v.", hash, err)
continue
}
// Collect block data.
_, msgBlock, err := collector.CollectHash(blockHash)
if err != nil {
// Do not quit if unable to collect side chain block data.
log.Errorf("Aux DB -> Unable to collect data for side chain block %s: %v.",
hash, err)
continue
}
// Get the chainwork
chainWork, err := rpcutils.GetChainWork(chainDB.Client, blockHash)
if err != nil {
log.Errorf("GetChainWork failed (%s): %v", blockHash, err)
continue
}
// PostgreSQL / aux DB
log.Debugf("Aux DB -> Importing block %s (height %d) into aux DB.",
blockHash, msgBlock.Header.Height)
// Stake invalidation is always handled by subsequent block, so
// add the block as valid. These are all side chain blocks.
isValid, isMainchain := true, false
// Existing DB records might be for mainchain and/or valid
// blocks, so these imported blocks should not data in rows that
// are conflicting as per the different table constraints and
// unique indexes.
updateExistingRecords := false
// Store data in the aux (dcrpg) DB.
_, _, _, err = chainDB.StoreBlock(msgBlock, isValid, isMainchain,
updateExistingRecords, true, true, chainWork)
if err != nil {
// If data collection succeeded, but storage fails, bail out
// to diagnose the DB trouble.
return fmt.Errorf("Aux DB -> ChainDB.StoreBlock failed: %v", err)
}
sideChainBlocksStored++
}
}
chainDB.InBatchSync = false
log.Infof("Successfully added %d blocks from %d side chains into dcrpg DB.",
sideChainBlocksStored, sideChainsStored)
// That may have taken a while, check again for new blocks from network.
if err = ensureSync(); err != nil {
return err
}
}
log.Infof("All ready, at height %d.", chainDBHeight)
explore.SetDBsSyncing(false)
psHub.SetReady(true)
// Pre-populate charts data using the dumped cache data in the .gob file path
// provided instead of querying the data from the dbs.
// Should be invoked before explore.Store to avoid double charts data
// cache population. This charts pre-population is faster than db querying
// and can be done before the monitors are fully set up.
dumpPath := filepath.Join(cfg.DataDir, cfg.ChartsCacheDump)
if err = charts.Load(dumpPath); err != nil {
log.Warnf("Failed to load charts data cache: %v", err)
}
// Add charts saver method after explorer and database stores. This may run
// asynchronously.
blockDataSavers = append(blockDataSavers, blockdata.BlockTrigger{