forked from percona/PerconaFT
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathnode.cc
1980 lines (1804 loc) · 77.3 KB
/
node.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
/*
COPYING CONDITIONS NOTICE:
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation, and provided that the
following conditions are met:
* Redistributions of source code must retain this COPYING
CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the
DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the
PATENT MARKING NOTICE (below), and the PATENT RIGHTS
GRANT (below).
* Redistributions in binary form must reproduce this COPYING
CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the
DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the
PATENT MARKING NOTICE (below), and the PATENT RIGHTS
GRANT (below) in the documentation and/or other materials
provided with the distribution.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
COPYRIGHT NOTICE:
TokuFT, Tokutek Fractal Tree Indexing Library.
Copyright (C) 2007-2013 Tokutek, Inc.
DISCLAIMER:
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
UNIVERSITY PATENT NOTICE:
The technology is licensed by the Massachusetts Institute of
Technology, Rutgers State University of New Jersey, and the Research
Foundation of State University of New York at Stony Brook under
United States of America Serial No. 11/760379 and to the patents
and/or patent applications resulting from it.
PATENT MARKING NOTICE:
This software is covered by US Patent No. 8,185,551.
This software is covered by US Patent No. 8,489,638.
PATENT RIGHTS GRANT:
"THIS IMPLEMENTATION" means the copyrightable works distributed by
Tokutek as part of the Fractal Tree project.
"PATENT CLAIMS" means the claims of patents that are owned or
licensable by Tokutek, both currently or in the future; and that in
the absence of this license would be infringed by THIS
IMPLEMENTATION or by using or running THIS IMPLEMENTATION.
"PATENT CHALLENGE" shall mean a challenge to the validity,
patentability, enforceability and/or non-infringement of any of the
PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS.
Tokutek hereby grants to you, for the term and geographical scope of
the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to
make, have made, use, offer to sell, sell, import, transfer, and
otherwise run, modify, and propagate the contents of THIS
IMPLEMENTATION, where such license applies only to the PATENT
CLAIMS. This grant does not include claims that would be infringed
only as a consequence of further modifications of THIS
IMPLEMENTATION. If you or your agent or licensee institute or order
or agree to the institution of patent litigation against any entity
(including a cross-claim or counterclaim in a lawsuit) alleging that
THIS IMPLEMENTATION constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any rights
granted to you under this License shall terminate as of the date
such litigation is filed. If you or your agent or exclusive
licensee institute or order or agree to the institution of a PATENT
CHALLENGE, then Tokutek may terminate any rights granted to you
under this License.
*/
#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include "ft/ft.h"
#include "ft/ft-internal.h"
#include "ft/serialize/ft_node-serialize.h"
#include "ft/node.h"
#include "ft/serialize/rbuf.h"
#include "ft/serialize/wbuf.h"
#include "util/scoped_malloc.h"
#include "util/sort.h"
// Effect: Fill in N as an empty ftnode.
// TODO: Rename toku_ftnode_create
void toku_initialize_empty_ftnode(FTNODE n, BLOCKNUM blocknum, int height, int num_children, int layout_version, unsigned int flags) {
paranoid_invariant(layout_version != 0);
paranoid_invariant(height >= 0);
n->max_msn_applied_to_node_on_disk = ZERO_MSN; // correct value for root node, harmless for others
n->flags = flags;
n->blocknum = blocknum;
n->layout_version = layout_version;
n->layout_version_original = layout_version;
n->layout_version_read_from_disk = layout_version;
n->height = height;
n->pivotkeys.create_empty();
n->bp = 0;
n->n_children = num_children;
n->oldest_referenced_xid_known = TXNID_NONE;
if (num_children > 0) {
XMALLOC_N(num_children, n->bp);
for (int i = 0; i < num_children; i++) {
BP_BLOCKNUM(n,i).b=0;
BP_STATE(n,i) = PT_INVALID;
BP_WORKDONE(n,i) = 0;
BP_INIT_TOUCHED_CLOCK(n, i);
set_BNULL(n,i);
if (height > 0) {
set_BNC(n, i, toku_create_empty_nl());
} else {
set_BLB(n, i, toku_create_empty_bn());
}
}
}
n->dirty = 1; // special case exception, it's okay to mark as dirty because the basements are empty
toku_ft_status_note_ftnode(height, true);
}
// destroys the internals of the ftnode, but it does not free the values
// that are stored
// this is common functionality for toku_ftnode_free and rebalance_ftnode_leaf
// MUST NOT do anything besides free the structures that have been allocated
void toku_destroy_ftnode_internals(FTNODE node) {
node->pivotkeys.destroy();
for (int i = 0; i < node->n_children; i++) {
if (BP_STATE(node,i) == PT_AVAIL) {
if (node->height > 0) {
destroy_nonleaf_childinfo(BNC(node,i));
} else {
destroy_basement_node(BLB(node, i));
}
} else if (BP_STATE(node,i) == PT_COMPRESSED) {
SUB_BLOCK sb = BSB(node,i);
toku_free(sb->compressed_ptr);
toku_free(sb);
} else {
paranoid_invariant(is_BNULL(node, i));
}
set_BNULL(node, i);
}
toku_free(node->bp);
node->bp = NULL;
}
/* Frees a node, including all the stuff in the hash table. */
void toku_ftnode_free(FTNODE *nodep) {
FTNODE node = *nodep;
toku_ft_status_note_ftnode(node->height, false);
toku_destroy_ftnode_internals(node);
toku_free(node);
*nodep = nullptr;
}
void toku_ftnode_update_disk_stats(FTNODE ftnode, FT ft, bool for_checkpoint) {
STAT64INFO_S deltas = ZEROSTATS;
// capture deltas before rebalancing basements for serialization
deltas = toku_get_and_clear_basement_stats(ftnode);
// locking not necessary here with respect to checkpointing
// in Clayface (because of the pending lock and cachetable lock
// in toku_cachetable_begin_checkpoint)
// essentially, if we are dealing with a for_checkpoint
// parameter in a function that is called by the flush_callback,
// then the cachetable needs to ensure that this is called in a safe
// manner that does not interfere with the beginning
// of a checkpoint, which it does with the cachetable lock
// and pending lock
toku_ft_update_stats(&ft->h->on_disk_stats, deltas);
if (for_checkpoint) {
toku_ft_update_stats(&ft->checkpoint_header->on_disk_stats, deltas);
}
}
void toku_ftnode_clone_partitions(FTNODE node, FTNODE cloned_node) {
for (int i = 0; i < node->n_children; i++) {
BP_BLOCKNUM(cloned_node,i) = BP_BLOCKNUM(node,i);
paranoid_invariant(BP_STATE(node,i) == PT_AVAIL);
BP_STATE(cloned_node,i) = PT_AVAIL;
BP_WORKDONE(cloned_node, i) = BP_WORKDONE(node, i);
if (node->height == 0) {
set_BLB(cloned_node, i, toku_clone_bn(BLB(node,i)));
} else {
set_BNC(cloned_node, i, toku_clone_nl(BNC(node,i)));
}
}
}
void toku_evict_bn_from_memory(FTNODE node, int childnum, FT ft) {
// free the basement node
assert(!node->dirty);
BASEMENTNODE bn = BLB(node, childnum);
toku_ft_decrease_stats(&ft->in_memory_stats, bn->stat64_delta);
destroy_basement_node(bn);
set_BNULL(node, childnum);
BP_STATE(node, childnum) = PT_ON_DISK;
}
BASEMENTNODE toku_detach_bn(FTNODE node, int childnum) {
assert(BP_STATE(node, childnum) == PT_AVAIL);
BASEMENTNODE bn = BLB(node, childnum);
set_BNULL(node, childnum);
BP_STATE(node, childnum) = PT_ON_DISK;
return bn;
}
//
// Orthopush
//
struct store_msg_buffer_offset_extra {
int32_t *offsets;
int i;
};
int store_msg_buffer_offset(const int32_t &offset, const uint32_t UU(idx), struct store_msg_buffer_offset_extra *const extra) __attribute__((nonnull(3)));
int store_msg_buffer_offset(const int32_t &offset, const uint32_t UU(idx), struct store_msg_buffer_offset_extra *const extra)
{
extra->offsets[extra->i] = offset;
extra->i++;
return 0;
}
/**
* Given pointers to offsets within a message buffer where we can find messages,
* figure out the MSN of each message, and compare those MSNs. Returns 1,
* 0, or -1 if a is larger than, equal to, or smaller than b.
*/
int msg_buffer_offset_msn_cmp(message_buffer &msg_buffer, const int32_t &ao, const int32_t &bo);
int msg_buffer_offset_msn_cmp(message_buffer &msg_buffer, const int32_t &ao, const int32_t &bo)
{
MSN amsn, bmsn;
msg_buffer.get_message_key_msn(ao, nullptr, &amsn);
msg_buffer.get_message_key_msn(bo, nullptr, &bmsn);
if (amsn.msn > bmsn.msn) {
return +1;
}
if (amsn.msn < bmsn.msn) {
return -1;
}
return 0;
}
/**
* Given a message buffer and and offset, apply the message with toku_ft_bn_apply_msg, or discard it,
* based on its MSN and the MSN of the basement node.
*/
static void
do_bn_apply_msg(FT_HANDLE ft_handle, BASEMENTNODE bn, message_buffer *msg_buffer, int32_t offset,
txn_gc_info *gc_info, uint64_t *workdone, STAT64INFO stats_to_update) {
DBT k, v;
ft_msg msg = msg_buffer->get_message(offset, &k, &v);
// The messages are being iterated over in (key,msn) order or just in
// msn order, so all the messages for one key, from one buffer, are in
// ascending msn order. So it's ok that we don't update the basement
// node's msn until the end.
if (msg.msn().msn > bn->max_msn_applied.msn) {
toku_ft_bn_apply_msg(
ft_handle->ft->cmp,
ft_handle->ft->update_fun,
bn,
msg,
gc_info,
workdone,
stats_to_update
);
} else {
toku_ft_status_note_msn_discard();
}
// We must always mark message as stale since it has been marked
// (using omt::iterate_and_mark_range)
// It is possible to call do_bn_apply_msg even when it won't apply the message because
// the node containing it could have been evicted and brought back in.
msg_buffer->set_freshness(offset, false);
}
struct iterate_do_bn_apply_msg_extra {
FT_HANDLE t;
BASEMENTNODE bn;
NONLEAF_CHILDINFO bnc;
txn_gc_info *gc_info;
uint64_t *workdone;
STAT64INFO stats_to_update;
};
int iterate_do_bn_apply_msg(const int32_t &offset, const uint32_t UU(idx), struct iterate_do_bn_apply_msg_extra *const e) __attribute__((nonnull(3)));
int iterate_do_bn_apply_msg(const int32_t &offset, const uint32_t UU(idx), struct iterate_do_bn_apply_msg_extra *const e)
{
do_bn_apply_msg(e->t, e->bn, &e->bnc->msg_buffer, offset, e->gc_info, e->workdone, e->stats_to_update);
return 0;
}
/**
* Given the bounds of the basement node to which we will apply messages,
* find the indexes within message_tree which contain the range of
* relevant messages.
*
* The message tree contains offsets into the buffer, where messages are
* found. The pivot_bounds are the lower bound exclusive and upper bound
* inclusive, because they come from pivot keys in the tree. We want OMT
* indices, which must have the lower bound be inclusive and the upper
* bound exclusive. We will get these by telling omt::find to look
* for something strictly bigger than each of our pivot bounds.
*
* Outputs the OMT indices in lbi (lower bound inclusive) and ube (upper
* bound exclusive).
*/
template<typename find_bounds_omt_t>
static void
find_bounds_within_message_tree(
const toku::comparator &cmp,
const find_bounds_omt_t &message_tree, /// tree holding message buffer offsets, in which we want to look for indices
message_buffer *msg_buffer, /// message buffer in which messages are found
const pivot_bounds &bounds, /// key bounds within the basement node we're applying messages to
uint32_t *lbi, /// (output) "lower bound inclusive" (index into message_tree)
uint32_t *ube /// (output) "upper bound exclusive" (index into message_tree)
)
{
int r = 0;
if (!toku_dbt_is_empty(bounds.lbe())) {
// By setting msn to MAX_MSN and by using direction of +1, we will
// get the first message greater than (in (key, msn) order) any
// message (with any msn) with the key lower_bound_exclusive.
// This will be a message we want to try applying, so it is the
// "lower bound inclusive" within the message_tree.
struct toku_msg_buffer_key_msn_heaviside_extra lbi_extra(cmp, msg_buffer, bounds.lbe(), MAX_MSN);
int32_t found_lb;
r = message_tree.template find<struct toku_msg_buffer_key_msn_heaviside_extra, toku_msg_buffer_key_msn_heaviside>(lbi_extra, +1, &found_lb, lbi);
if (r == DB_NOTFOUND) {
// There is no relevant data (the lower bound is bigger than
// any message in this tree), so we have no range and we're
// done.
*lbi = 0;
*ube = 0;
return;
}
if (!toku_dbt_is_empty(bounds.ubi())) {
// Check if what we found for lbi is greater than the upper
// bound inclusive that we have. If so, there are no relevant
// messages between these bounds.
const DBT *ubi = bounds.ubi();
const int32_t offset = found_lb;
DBT found_lbidbt;
msg_buffer->get_message_key_msn(offset, &found_lbidbt, nullptr);
int c = cmp(&found_lbidbt, ubi);
// These DBTs really are both inclusive bounds, so we need
// strict inequality in order to determine that there's
// nothing between them. If they're equal, then we actually
// need to apply the message pointed to by lbi, and also
// anything with the same key but a bigger msn.
if (c > 0) {
*lbi = 0;
*ube = 0;
return;
}
}
} else {
// No lower bound given, it's negative infinity, so we start at
// the first message in the OMT.
*lbi = 0;
}
if (!toku_dbt_is_empty(bounds.ubi())) {
// Again, we use an msn of MAX_MSN and a direction of +1 to get
// the first thing bigger than the upper_bound_inclusive key.
// This is therefore the smallest thing we don't want to apply,
// and omt::iterate_on_range will not examine it.
struct toku_msg_buffer_key_msn_heaviside_extra ube_extra(cmp, msg_buffer, bounds.ubi(), MAX_MSN);
r = message_tree.template find<struct toku_msg_buffer_key_msn_heaviside_extra, toku_msg_buffer_key_msn_heaviside>(ube_extra, +1, nullptr, ube);
if (r == DB_NOTFOUND) {
// Couldn't find anything in the buffer bigger than our key,
// so we need to look at everything up to the end of
// message_tree.
*ube = message_tree.size();
}
} else {
// No upper bound given, it's positive infinity, so we need to go
// through the end of the OMT.
*ube = message_tree.size();
}
}
/**
* For each message in the ancestor's buffer (determined by childnum) that
* is key-wise between lower_bound_exclusive and upper_bound_inclusive,
* apply the message to the basement node. We treat the bounds as minus
* or plus infinity respectively if they are NULL. Do not mark the node
* as dirty (preserve previous state of 'dirty' bit).
*/
static void
bnc_apply_messages_to_basement_node(
FT_HANDLE t, // used for comparison function
BASEMENTNODE bn, // where to apply messages
FTNODE ancestor, // the ancestor node where we can find messages to apply
int childnum, // which child buffer of ancestor contains messages we want
const pivot_bounds &bounds, // contains pivot key bounds of this basement node
txn_gc_info *gc_info,
bool* msgs_applied
)
{
int r;
NONLEAF_CHILDINFO bnc = BNC(ancestor, childnum);
// Determine the offsets in the message trees between which we need to
// apply messages from this buffer
STAT64INFO_S stats_delta = {0,0};
uint64_t workdone_this_ancestor = 0;
uint32_t stale_lbi, stale_ube;
if (!bn->stale_ancestor_messages_applied) {
find_bounds_within_message_tree(t->ft->cmp, bnc->stale_message_tree, &bnc->msg_buffer, bounds, &stale_lbi, &stale_ube);
} else {
stale_lbi = 0;
stale_ube = 0;
}
uint32_t fresh_lbi, fresh_ube;
find_bounds_within_message_tree(t->ft->cmp, bnc->fresh_message_tree, &bnc->msg_buffer, bounds, &fresh_lbi, &fresh_ube);
// We now know where all the messages we must apply are, so one of the
// following 4 cases will do the application, depending on which of
// the lists contains relevant messages:
//
// 1. broadcast messages and anything else, or a mix of fresh and stale
// 2. only fresh messages
// 3. only stale messages
if (bnc->broadcast_list.size() > 0 ||
(stale_lbi != stale_ube && fresh_lbi != fresh_ube)) {
// We have messages in multiple trees, so we grab all
// the relevant messages' offsets and sort them by MSN, then apply
// them in MSN order.
const int buffer_size = ((stale_ube - stale_lbi) + (fresh_ube - fresh_lbi) + bnc->broadcast_list.size());
toku::scoped_malloc offsets_buf(buffer_size * sizeof(int32_t));
int32_t *offsets = reinterpret_cast<int32_t *>(offsets_buf.get());
struct store_msg_buffer_offset_extra sfo_extra = { .offsets = offsets, .i = 0 };
// Populate offsets array with offsets to stale messages
r = bnc->stale_message_tree.iterate_on_range<struct store_msg_buffer_offset_extra, store_msg_buffer_offset>(stale_lbi, stale_ube, &sfo_extra);
assert_zero(r);
// Then store fresh offsets, and mark them to be moved to stale later.
r = bnc->fresh_message_tree.iterate_and_mark_range<struct store_msg_buffer_offset_extra, store_msg_buffer_offset>(fresh_lbi, fresh_ube, &sfo_extra);
assert_zero(r);
// Store offsets of all broadcast messages.
r = bnc->broadcast_list.iterate<struct store_msg_buffer_offset_extra, store_msg_buffer_offset>(&sfo_extra);
assert_zero(r);
invariant(sfo_extra.i == buffer_size);
// Sort by MSN.
toku::sort<int32_t, message_buffer, msg_buffer_offset_msn_cmp>::mergesort_r(offsets, buffer_size, bnc->msg_buffer);
// Apply the messages in MSN order.
for (int i = 0; i < buffer_size; ++i) {
*msgs_applied = true;
do_bn_apply_msg(t, bn, &bnc->msg_buffer, offsets[i], gc_info, &workdone_this_ancestor, &stats_delta);
}
} else if (stale_lbi == stale_ube) {
// No stale messages to apply, we just apply fresh messages, and mark them to be moved to stale later.
struct iterate_do_bn_apply_msg_extra iter_extra = { .t = t, .bn = bn, .bnc = bnc, .gc_info = gc_info, .workdone = &workdone_this_ancestor, .stats_to_update = &stats_delta };
if (fresh_ube - fresh_lbi > 0) *msgs_applied = true;
r = bnc->fresh_message_tree.iterate_and_mark_range<struct iterate_do_bn_apply_msg_extra, iterate_do_bn_apply_msg>(fresh_lbi, fresh_ube, &iter_extra);
assert_zero(r);
} else {
invariant(fresh_lbi == fresh_ube);
// No fresh messages to apply, we just apply stale messages.
if (stale_ube - stale_lbi > 0) *msgs_applied = true;
struct iterate_do_bn_apply_msg_extra iter_extra = { .t = t, .bn = bn, .bnc = bnc, .gc_info = gc_info, .workdone = &workdone_this_ancestor, .stats_to_update = &stats_delta };
r = bnc->stale_message_tree.iterate_on_range<struct iterate_do_bn_apply_msg_extra, iterate_do_bn_apply_msg>(stale_lbi, stale_ube, &iter_extra);
assert_zero(r);
}
//
// update stats
//
if (workdone_this_ancestor > 0) {
(void) toku_sync_fetch_and_add(&BP_WORKDONE(ancestor, childnum), workdone_this_ancestor);
}
if (stats_delta.numbytes || stats_delta.numrows) {
toku_ft_update_stats(&t->ft->in_memory_stats, stats_delta);
}
}
static void
apply_ancestors_messages_to_bn(
FT_HANDLE t,
FTNODE node,
int childnum,
ANCESTORS ancestors,
const pivot_bounds &bounds,
txn_gc_info *gc_info,
bool* msgs_applied
)
{
BASEMENTNODE curr_bn = BLB(node, childnum);
const pivot_bounds curr_bounds = bounds.next_bounds(node, childnum);
for (ANCESTORS curr_ancestors = ancestors; curr_ancestors; curr_ancestors = curr_ancestors->next) {
if (curr_ancestors->node->max_msn_applied_to_node_on_disk.msn > curr_bn->max_msn_applied.msn) {
paranoid_invariant(BP_STATE(curr_ancestors->node, curr_ancestors->childnum) == PT_AVAIL);
bnc_apply_messages_to_basement_node(
t,
curr_bn,
curr_ancestors->node,
curr_ancestors->childnum,
curr_bounds,
gc_info,
msgs_applied
);
// We don't want to check this ancestor node again if the
// next time we query it, the msn hasn't changed.
curr_bn->max_msn_applied = curr_ancestors->node->max_msn_applied_to_node_on_disk;
}
}
// At this point, we know all the stale messages above this
// basement node have been applied, and any new messages will be
// fresh, so we don't need to look at stale messages for this
// basement node, unless it gets evicted (and this field becomes
// false when it's read in again).
curr_bn->stale_ancestor_messages_applied = true;
}
void
toku_apply_ancestors_messages_to_node (
FT_HANDLE t,
FTNODE node,
ANCESTORS ancestors,
const pivot_bounds &bounds,
bool* msgs_applied,
int child_to_read
)
// Effect:
// Bring a leaf node up-to-date according to all the messages in the ancestors.
// If the leaf node is already up-to-date then do nothing.
// If the leaf node is not already up-to-date, then record the work done
// for that leaf in each ancestor.
// Requires:
// This is being called when pinning a leaf node for the query path.
// The entire root-to-leaf path is pinned and appears in the ancestors list.
{
VERIFY_NODE(t, node);
paranoid_invariant(node->height == 0);
TXN_MANAGER txn_manager = toku_ft_get_txn_manager(t);
txn_manager_state txn_state_for_gc(txn_manager);
TXNID oldest_referenced_xid_for_simple_gc = toku_ft_get_oldest_referenced_xid_estimate(t);
txn_gc_info gc_info(&txn_state_for_gc,
oldest_referenced_xid_for_simple_gc,
node->oldest_referenced_xid_known,
true);
if (!node->dirty && child_to_read >= 0) {
paranoid_invariant(BP_STATE(node, child_to_read) == PT_AVAIL);
apply_ancestors_messages_to_bn(
t,
node,
child_to_read,
ancestors,
bounds,
&gc_info,
msgs_applied
);
}
else {
// know we are a leaf node
// An important invariant:
// We MUST bring every available basement node for a dirty node up to date.
// flushing on the cleaner thread depends on this. This invariant
// allows the cleaner thread to just pick an internal node and flush it
// as opposed to being forced to start from the root.
for (int i = 0; i < node->n_children; i++) {
if (BP_STATE(node, i) != PT_AVAIL) { continue; }
apply_ancestors_messages_to_bn(
t,
node,
i,
ancestors,
bounds,
&gc_info,
msgs_applied
);
}
}
VERIFY_NODE(t, node);
}
static bool bn_needs_ancestors_messages(
FT ft,
FTNODE node,
int childnum,
const pivot_bounds &bounds,
ANCESTORS ancestors,
MSN* max_msn_applied
)
{
BASEMENTNODE bn = BLB(node, childnum);
const pivot_bounds curr_bounds = bounds.next_bounds(node, childnum);
bool needs_ancestors_messages = false;
for (ANCESTORS curr_ancestors = ancestors; curr_ancestors; curr_ancestors = curr_ancestors->next) {
if (curr_ancestors->node->max_msn_applied_to_node_on_disk.msn > bn->max_msn_applied.msn) {
paranoid_invariant(BP_STATE(curr_ancestors->node, curr_ancestors->childnum) == PT_AVAIL);
NONLEAF_CHILDINFO bnc = BNC(curr_ancestors->node, curr_ancestors->childnum);
if (bnc->broadcast_list.size() > 0) {
needs_ancestors_messages = true;
goto cleanup;
}
if (!bn->stale_ancestor_messages_applied) {
uint32_t stale_lbi, stale_ube;
find_bounds_within_message_tree(ft->cmp,
bnc->stale_message_tree,
&bnc->msg_buffer,
curr_bounds,
&stale_lbi,
&stale_ube);
if (stale_lbi < stale_ube) {
needs_ancestors_messages = true;
goto cleanup;
}
}
uint32_t fresh_lbi, fresh_ube;
find_bounds_within_message_tree(ft->cmp,
bnc->fresh_message_tree,
&bnc->msg_buffer,
curr_bounds,
&fresh_lbi,
&fresh_ube);
if (fresh_lbi < fresh_ube) {
needs_ancestors_messages = true;
goto cleanup;
}
if (curr_ancestors->node->max_msn_applied_to_node_on_disk.msn > max_msn_applied->msn) {
max_msn_applied->msn = curr_ancestors->node->max_msn_applied_to_node_on_disk.msn;
}
}
}
cleanup:
return needs_ancestors_messages;
}
bool toku_ft_leaf_needs_ancestors_messages(
FT ft,
FTNODE node,
ANCESTORS ancestors,
const pivot_bounds &bounds,
MSN *const max_msn_in_path,
int child_to_read
)
// Effect: Determine whether there are messages in a node's ancestors
// which must be applied to it. These messages are in the correct
// keyrange for any available basement nodes, and are in nodes with the
// correct max_msn_applied_to_node_on_disk.
// Notes:
// This is an approximate query.
// Output:
// max_msn_in_path: max of "max_msn_applied_to_node_on_disk" over
// ancestors. This is used later to update basement nodes'
// max_msn_applied values in case we don't do the full algorithm.
// Returns:
// true if there may be some such messages
// false only if there are definitely no such messages
// Rationale:
// When we pin a node with a read lock, we want to quickly determine if
// we should exchange it for a write lock in preparation for applying
// messages. If there are no messages, we don't need the write lock.
{
paranoid_invariant(node->height == 0);
bool needs_ancestors_messages = false;
// child_to_read may be -1 in test cases
if (!node->dirty && child_to_read >= 0) {
paranoid_invariant(BP_STATE(node, child_to_read) == PT_AVAIL);
needs_ancestors_messages = bn_needs_ancestors_messages(
ft,
node,
child_to_read,
bounds,
ancestors,
max_msn_in_path
);
}
else {
for (int i = 0; i < node->n_children; ++i) {
if (BP_STATE(node, i) != PT_AVAIL) { continue; }
needs_ancestors_messages = bn_needs_ancestors_messages(
ft,
node,
i,
bounds,
ancestors,
max_msn_in_path
);
if (needs_ancestors_messages) {
goto cleanup;
}
}
}
cleanup:
return needs_ancestors_messages;
}
void toku_ft_bn_update_max_msn(FTNODE node, MSN max_msn_applied, int child_to_read) {
invariant(node->height == 0);
if (!node->dirty && child_to_read >= 0) {
paranoid_invariant(BP_STATE(node, child_to_read) == PT_AVAIL);
BASEMENTNODE bn = BLB(node, child_to_read);
if (max_msn_applied.msn > bn->max_msn_applied.msn) {
// see comment below
(void) toku_sync_val_compare_and_swap(&bn->max_msn_applied.msn, bn->max_msn_applied.msn, max_msn_applied.msn);
}
}
else {
for (int i = 0; i < node->n_children; ++i) {
if (BP_STATE(node, i) != PT_AVAIL) { continue; }
BASEMENTNODE bn = BLB(node, i);
if (max_msn_applied.msn > bn->max_msn_applied.msn) {
// This function runs in a shared access context, so to silence tools
// like DRD, we use a CAS and ignore the result.
// Any threads trying to update these basement nodes should be
// updating them to the same thing (since they all have a read lock on
// the same root-to-leaf path) so this is safe.
(void) toku_sync_val_compare_and_swap(&bn->max_msn_applied.msn, bn->max_msn_applied.msn, max_msn_applied.msn);
}
}
}
}
struct copy_to_stale_extra {
FT ft;
NONLEAF_CHILDINFO bnc;
};
int copy_to_stale(const int32_t &offset, const uint32_t UU(idx), struct copy_to_stale_extra *const extra) __attribute__((nonnull(3)));
int copy_to_stale(const int32_t &offset, const uint32_t UU(idx), struct copy_to_stale_extra *const extra)
{
MSN msn;
DBT key;
extra->bnc->msg_buffer.get_message_key_msn(offset, &key, &msn);
struct toku_msg_buffer_key_msn_heaviside_extra heaviside_extra(extra->ft->cmp, &extra->bnc->msg_buffer, &key, msn);
int r = extra->bnc->stale_message_tree.insert<struct toku_msg_buffer_key_msn_heaviside_extra, toku_msg_buffer_key_msn_heaviside>(offset, heaviside_extra, nullptr);
invariant_zero(r);
return 0;
}
void toku_ft_bnc_move_messages_to_stale(FT ft, NONLEAF_CHILDINFO bnc) {
struct copy_to_stale_extra cts_extra = { .ft = ft, .bnc = bnc };
int r = bnc->fresh_message_tree.iterate_over_marked<struct copy_to_stale_extra, copy_to_stale>(&cts_extra);
invariant_zero(r);
bnc->fresh_message_tree.delete_all_marked();
}
void toku_move_ftnode_messages_to_stale(FT ft, FTNODE node) {
invariant(node->height > 0);
for (int i = 0; i < node->n_children; ++i) {
if (BP_STATE(node, i) != PT_AVAIL) {
continue;
}
NONLEAF_CHILDINFO bnc = BNC(node, i);
// We can't delete things out of the fresh tree inside the above
// procedures because we're still looking at the fresh tree. Instead
// we have to move messages after we're done looking at it.
toku_ft_bnc_move_messages_to_stale(ft, bnc);
}
}
//
// Balance // Availibility // Size
struct rebalance_array_info {
uint32_t offset;
LEAFENTRY *le_array;
uint32_t *key_sizes_array;
const void **key_ptr_array;
static int fn(const void* key, const uint32_t keylen, const LEAFENTRY &le,
const uint32_t idx, struct rebalance_array_info *const ai) {
ai->le_array[idx+ai->offset] = le;
ai->key_sizes_array[idx+ai->offset] = keylen;
ai->key_ptr_array[idx+ai->offset] = key;
return 0;
}
};
// There must still be at least one child
// Requires that all messages in buffers above have been applied.
// Because all messages above have been applied, setting msn of all new basements
// to max msn of existing basements is correct. (There cannot be any messages in
// buffers above that still need to be applied.)
void toku_ftnode_leaf_rebalance(FTNODE node, unsigned int basementnodesize) {
assert(node->height == 0);
assert(node->dirty);
uint32_t num_orig_basements = node->n_children;
// Count number of leaf entries in this leaf (num_le).
uint32_t num_le = 0;
for (uint32_t i = 0; i < num_orig_basements; i++) {
num_le += BLB_DATA(node, i)->num_klpairs();
}
uint32_t num_alloc = num_le ? num_le : 1; // simplify logic below by always having at least one entry per array
// Create an array of OMTVALUE's that store all the pointers to all the data.
// Each element in leafpointers is a pointer to a leaf.
toku::scoped_malloc leafpointers_buf(sizeof(LEAFENTRY) * num_alloc);
LEAFENTRY *leafpointers = reinterpret_cast<LEAFENTRY *>(leafpointers_buf.get());
leafpointers[0] = NULL;
toku::scoped_malloc key_pointers_buf(sizeof(void *) * num_alloc);
const void **key_pointers = reinterpret_cast<const void **>(key_pointers_buf.get());
key_pointers[0] = NULL;
toku::scoped_malloc key_sizes_buf(sizeof(uint32_t) * num_alloc);
uint32_t *key_sizes = reinterpret_cast<uint32_t *>(key_sizes_buf.get());
// Capture pointers to old mempools' buffers (so they can be destroyed)
toku::scoped_malloc old_bns_buf(sizeof(BASEMENTNODE) * num_orig_basements);
BASEMENTNODE *old_bns = reinterpret_cast<BASEMENTNODE *>(old_bns_buf.get());
old_bns[0] = NULL;
uint32_t curr_le = 0;
for (uint32_t i = 0; i < num_orig_basements; i++) {
bn_data* bd = BLB_DATA(node, i);
struct rebalance_array_info ai {.offset = curr_le, .le_array = leafpointers, .key_sizes_array = key_sizes, .key_ptr_array = key_pointers };
bd->iterate<rebalance_array_info, rebalance_array_info::fn>(&ai);
curr_le += bd->num_klpairs();
}
// Create an array that will store indexes of new pivots.
// Each element in new_pivots is the index of a pivot key.
// (Allocating num_le of them is overkill, but num_le is an upper bound.)
toku::scoped_malloc new_pivots_buf(sizeof(uint32_t) * num_alloc);
uint32_t *new_pivots = reinterpret_cast<uint32_t *>(new_pivots_buf.get());
new_pivots[0] = 0;
// Each element in le_sizes is the size of the leafentry pointed to by leafpointers.
toku::scoped_malloc le_sizes_buf(sizeof(size_t) * num_alloc);
size_t *le_sizes = reinterpret_cast<size_t *>(le_sizes_buf.get());
le_sizes[0] = 0;
// Create an array that will store the size of each basement.
// This is the sum of the leaf sizes of all the leaves in that basement.
// We don't know how many basements there will be, so we use num_le as the upper bound.
// Sum of all le sizes in a single basement
toku::scoped_calloc bn_le_sizes_buf(sizeof(size_t) * num_alloc);
size_t *bn_le_sizes = reinterpret_cast<size_t *>(bn_le_sizes_buf.get());
// Sum of all key sizes in a single basement
toku::scoped_calloc bn_key_sizes_buf(sizeof(size_t) * num_alloc);
size_t *bn_key_sizes = reinterpret_cast<size_t *>(bn_key_sizes_buf.get());
// TODO 4050: All these arrays should be combined into a single array of some bn_info struct (pivot, msize, num_les).
// Each entry is the number of leafentries in this basement. (Again, num_le is overkill upper baound.)
toku::scoped_malloc num_les_this_bn_buf(sizeof(uint32_t) * num_alloc);
uint32_t *num_les_this_bn = reinterpret_cast<uint32_t *>(num_les_this_bn_buf.get());
num_les_this_bn[0] = 0;
// Figure out the new pivots.
// We need the index of each pivot, and for each basement we need
// the number of leaves and the sum of the sizes of the leaves (memory requirement for basement).
uint32_t curr_pivot = 0;
uint32_t num_le_in_curr_bn = 0;
uint32_t bn_size_so_far = 0;
for (uint32_t i = 0; i < num_le; i++) {
uint32_t curr_le_size = leafentry_disksize((LEAFENTRY) leafpointers[i]);
le_sizes[i] = curr_le_size;
if ((bn_size_so_far + curr_le_size + sizeof(uint32_t) + key_sizes[i] > basementnodesize) && (num_le_in_curr_bn != 0)) {
// cap off the current basement node to end with the element before i
new_pivots[curr_pivot] = i-1;
curr_pivot++;
num_le_in_curr_bn = 0;
bn_size_so_far = 0;
}
num_le_in_curr_bn++;
num_les_this_bn[curr_pivot] = num_le_in_curr_bn;
bn_le_sizes[curr_pivot] += curr_le_size;
bn_key_sizes[curr_pivot] += sizeof(uint32_t) + key_sizes[i]; // uint32_t le_offset
bn_size_so_far += curr_le_size + sizeof(uint32_t) + key_sizes[i];
}
// curr_pivot is now the total number of pivot keys in the leaf node
int num_pivots = curr_pivot;
int num_children = num_pivots + 1;
// now we need to fill in the new basement nodes and pivots
// TODO: (Zardosht) this is an ugly thing right now
// Need to figure out how to properly deal with seqinsert.
// I am not happy with how this is being
// handled with basement nodes
uint32_t tmp_seqinsert = BLB_SEQINSERT(node, num_orig_basements - 1);
// choose the max msn applied to any basement as the max msn applied to all new basements
MSN max_msn = ZERO_MSN;
for (uint32_t i = 0; i < num_orig_basements; i++) {
MSN curr_msn = BLB_MAX_MSN_APPLIED(node,i);
max_msn = (curr_msn.msn > max_msn.msn) ? curr_msn : max_msn;
}
// remove the basement node in the node, we've saved a copy
for (uint32_t i = 0; i < num_orig_basements; i++) {
// save a reference to the old basement nodes
// we will need them to ensure that the memory
// stays intact
old_bns[i] = toku_detach_bn(node, i);
}
// Now destroy the old basements, but do not destroy leaves
toku_destroy_ftnode_internals(node);
// now reallocate pieces and start filling them in
invariant(num_children > 0);
node->n_children = num_children;
XCALLOC_N(num_children, node->bp); // allocate pointers to basements (bp)
for (int i = 0; i < num_children; i++) {
set_BLB(node, i, toku_create_empty_bn()); // allocate empty basements and set bp pointers
}
// now we start to fill in the data
// first the pivots
toku::scoped_malloc pivotkeys_buf(num_pivots * sizeof(DBT));
DBT *pivotkeys = reinterpret_cast<DBT *>(pivotkeys_buf.get());
for (int i = 0; i < num_pivots; i++) {
uint32_t size = key_sizes[new_pivots[i]];
const void *key = key_pointers[new_pivots[i]];
toku_fill_dbt(&pivotkeys[i], key, size);
}
node->pivotkeys.create_from_dbts(pivotkeys, num_pivots);
uint32_t baseindex_this_bn = 0;
// now the basement nodes
for (int i = 0; i < num_children; i++) {
// put back seqinsert
BLB_SEQINSERT(node, i) = tmp_seqinsert;
// create start (inclusive) and end (exclusive) boundaries for data of basement node
uint32_t curr_start = (i==0) ? 0 : new_pivots[i-1]+1; // index of first leaf in basement
uint32_t curr_end = (i==num_pivots) ? num_le : new_pivots[i]+1; // index of first leaf in next basement
uint32_t num_in_bn = curr_end - curr_start; // number of leaves in this basement
// create indexes for new basement
invariant(baseindex_this_bn == curr_start);
uint32_t num_les_to_copy = num_les_this_bn[i];
invariant(num_les_to_copy == num_in_bn);
bn_data* bd = BLB_DATA(node, i);
bd->set_contents_as_clone_of_sorted_array(
num_les_to_copy,
&key_pointers[baseindex_this_bn],
&key_sizes[baseindex_this_bn],
&leafpointers[baseindex_this_bn],
&le_sizes[baseindex_this_bn],
bn_key_sizes[i], // Total key sizes
bn_le_sizes[i] // total le sizes
);
BP_STATE(node,i) = PT_AVAIL;
BP_TOUCH_CLOCK(node,i);
BLB_MAX_MSN_APPLIED(node,i) = max_msn;
baseindex_this_bn += num_les_to_copy; // set to index of next bn
}
node->max_msn_applied_to_node_on_disk = max_msn;
// destroy buffers of old mempools
for (uint32_t i = 0; i < num_orig_basements; i++) {
destroy_basement_node(old_bns[i]);
}
}
bool toku_ftnode_fully_in_memory(FTNODE node) {
for (int i = 0; i < node->n_children; i++) {
if (BP_STATE(node,i) != PT_AVAIL) {
return false;
}
}
return true;
}
void toku_ftnode_assert_fully_in_memory(FTNODE UU(node)) {
paranoid_invariant(toku_ftnode_fully_in_memory(node));
}
uint32_t toku_ftnode_leaf_num_entries(FTNODE node) {