forked from scylladb/seastar
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrpc.cc
1340 lines (1233 loc) · 54.4 KB
/
rpc.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#include <seastar/rpc/rpc.hh>
#include <seastar/core/align.hh>
#include <seastar/core/seastar.hh>
#include <seastar/core/print.hh>
#include <seastar/core/future-util.hh>
#include <seastar/core/metrics.hh>
#include <boost/range/adaptor/map.hpp>
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<seastar::rpc::streaming_domain_type> : fmt::ostream_formatter {};
#endif
namespace seastar {
namespace rpc {
void logger::operator()(const client_info& info, id_type msg_id, const sstring& str) const {
log(format("client {} msg_id {}: {}", info.addr, msg_id, str));
}
void logger::operator()(const client_info& info, id_type msg_id, log_level level, std::string_view str) const {
log(level, "client {} msg_id {}: {}", info.addr, msg_id, str);
}
void logger::operator()(const client_info& info, const sstring& str) const {
(*this)(info.addr, str);
}
void logger::operator()(const client_info& info, log_level level, std::string_view str) const {
(*this)(info.addr, level, str);
}
void logger::operator()(const socket_address& addr, const sstring& str) const {
log(format("client {}: {}", addr, str));
}
void logger::operator()(const socket_address& addr, log_level level, std::string_view str) const {
log(level, "client {}: {}", addr, str);
}
no_wait_type no_wait;
snd_buf::snd_buf(size_t size_) : size(size_) {
if (size <= chunk_size) {
bufs = temporary_buffer<char>(size);
} else {
std::vector<temporary_buffer<char>> v;
v.reserve(align_up(size_t(size), chunk_size) / chunk_size);
while (size_) {
v.push_back(temporary_buffer<char>(std::min(chunk_size, size_)));
size_ -= v.back().size();
}
bufs = std::move(v);
}
}
snd_buf::snd_buf(snd_buf&&) noexcept = default;
snd_buf& snd_buf::operator=(snd_buf&&) noexcept = default;
temporary_buffer<char>& snd_buf::front() {
auto* one = std::get_if<temporary_buffer<char>>(&bufs);
if (one) {
return *one;
} else {
return std::get<std::vector<temporary_buffer<char>>>(bufs).front();
}
}
// Make a copy of a remote buffer. No data is actually copied, only pointers and
// a deleter of a new buffer takes care of deleting the original buffer
template<typename T> // T is either snd_buf or rcv_buf
T make_shard_local_buffer_copy(foreign_ptr<std::unique_ptr<T>> org) {
if (org.get_owner_shard() == this_shard_id()) {
return std::move(*org);
}
T buf(org->size);
auto* one = std::get_if<temporary_buffer<char>>(&org->bufs);
if (one) {
buf.bufs = temporary_buffer<char>(one->get_write(), one->size(), make_object_deleter(std::move(org)));
} else {
auto& orgbufs = std::get<std::vector<temporary_buffer<char>>>(org->bufs);
std::vector<temporary_buffer<char>> newbufs;
newbufs.reserve(orgbufs.size());
deleter d = make_object_deleter(std::move(org));
for (auto&& b : orgbufs) {
newbufs.push_back(temporary_buffer<char>(b.get_write(), b.size(), d.share()));
}
buf.bufs = std::move(newbufs);
}
return buf;
}
template snd_buf make_shard_local_buffer_copy(foreign_ptr<std::unique_ptr<snd_buf>>);
template rcv_buf make_shard_local_buffer_copy(foreign_ptr<std::unique_ptr<rcv_buf>>);
static void log_exception(connection& c, log_level level, const char* log, std::exception_ptr eptr) {
const char* s;
try {
std::rethrow_exception(eptr);
} catch (std::exception& ex) {
s = ex.what();
} catch (...) {
s = "unknown exception";
}
auto formatted = format("{}: {}", log, s);
c.get_logger()(c.peer_address(), level, std::string_view(formatted.data(), formatted.size()));
}
snd_buf connection::compress(snd_buf buf) {
if (_compressor) {
buf = _compressor->compress(4, std::move(buf));
static_assert(snd_buf::chunk_size >= 4, "send buffer chunk size is too small");
write_le<uint32_t>(buf.front().get_write(), buf.size - 4);
return buf;
}
return buf;
}
future<> connection::send_buffer(snd_buf buf) {
auto* b = std::get_if<temporary_buffer<char>>(&buf.bufs);
if (b) {
return _write_buf.write(std::move(*b));
} else {
return do_with(std::move(std::get<std::vector<temporary_buffer<char>>>(buf.bufs)),
[this] (std::vector<temporary_buffer<char>>& ar) {
return do_for_each(ar.begin(), ar.end(), [this] (auto& b) {
return _write_buf.write(std::move(b));
});
});
}
}
future<> connection::send_entry(outgoing_entry& d) {
if (_propagate_timeout) {
static_assert(snd_buf::chunk_size >= sizeof(uint64_t), "send buffer chunk size is too small");
if (_timeout_negotiated) {
auto expire = d.t.get_timeout();
uint64_t left = 0;
if (expire != typename timer<rpc_clock_type>::time_point()) {
left = std::chrono::duration_cast<std::chrono::milliseconds>(expire - timer<rpc_clock_type>::clock::now()).count();
}
write_le<uint64_t>(d.buf.front().get_write(), left);
} else {
d.buf.front().trim_front(sizeof(uint64_t));
d.buf.size -= sizeof(uint64_t);
}
}
auto buf = compress(std::move(d.buf));
return send_buffer(std::move(buf)).then([this] {
_stats.sent_messages++;
return _write_buf.flush();
});
}
void connection::set_negotiated() noexcept {
_negotiated->set_value();
_negotiated = std::nullopt;
}
future<> connection::stop_send_loop(std::exception_ptr ex) {
_error = true;
if (_connected) {
_fd.shutdown_output();
}
if (ex == nullptr) {
ex = std::make_exception_ptr(closed_error());
}
while (!_outgoing_queue.empty()) {
auto it = std::prev(_outgoing_queue.end());
// Cancel all but front entry normally. The front entry is sitting in the
// send_entry() and cannot be withdrawn, except when _negotiated is still
// engaged. In the latter case when it will be aborted below the entry's
// continuation will not be called and its done promise will not resolve
// the _outgoing_queue_ready, so do it here
if (it != _outgoing_queue.begin()) {
withdraw(it, ex);
} else {
if (_negotiated) {
it->done.set_exception(ex);
}
break;
}
}
if (_negotiated) {
_negotiated->set_exception(ex);
}
return when_all(std::move(_outgoing_queue_ready), std::move(_sink_closed_future)).then([this] (std::tuple<future<>, future<bool>> res){
// _outgoing_queue_ready might be exceptional if queue drain or
// _negotiated abortion set it such
std::get<0>(res).ignore_ready_future();
// _sink_closed_future is never exceptional
bool sink_closed = std::get<1>(res).get0();
return _connected && !sink_closed ? _write_buf.close() : make_ready_future();
});
}
void connection::set_socket(connected_socket&& fd) {
if (_connected) {
throw std::runtime_error("already connected");
}
_fd = std::move(fd);
_read_buf =_fd.input();
_write_buf = _fd.output();
_connected = true;
}
future<> connection::send_negotiation_frame(feature_map features) {
auto negotiation_frame_feature_record_size = [] (const feature_map::value_type& e) {
return 8 + e.second.size();
};
auto extra_len = boost::accumulate(
features | boost::adaptors::transformed(negotiation_frame_feature_record_size),
uint32_t(0));
temporary_buffer<char> reply(sizeof(negotiation_frame) + extra_len);
auto p = reply.get_write();
p = std::copy_n(rpc_magic, 8, p);
write_le<uint32_t>(p, extra_len);
p += 4;
for (auto&& e : features) {
write_le<uint32_t>(p, static_cast<uint32_t>(e.first));
p += 4;
write_le<uint32_t>(p, e.second.size());
p += 4;
p = std::copy_n(e.second.begin(), e.second.size(), p);
}
return _write_buf.write(std::move(reply)).then([this] {
_stats.sent_messages++;
return _write_buf.flush();
});
}
void connection::withdraw(outgoing_entry::container_t::iterator it, std::exception_ptr ex) {
assert(it != _outgoing_queue.end());
auto pit = std::prev(it);
// Previous entry's (pit's) done future will schedule current entry (it)
// continuation. Similarly, it.done will schedule next entry continuation
// or will resolve _outgoing_queue_ready future.
//
// To withdraw "it" we need to do two things:
// - make pit.done resolve it->next (some time later)
// - resolve "it"'s continuation right now
//
// The latter is achieved by resolving pit.done immediatelly, the former
// by moving it.done into pit.done. For simplicity (verging on obscurity?)
// both done's are just swapped and "it" resolves its new promise
std::swap(it->done, pit->done);
it->uncancellable();
it->unlink();
if (ex == nullptr) {
it->done.set_value();
} else {
it->done.set_exception(ex);
}
}
future<> connection::send(snd_buf buf, std::optional<rpc_clock_type::time_point> timeout, cancellable* cancel) {
if (!_error) {
if (timeout && *timeout <= rpc_clock_type::now()) {
return make_ready_future<>();
}
auto p = std::make_unique<outgoing_entry>(std::move(buf));
auto& d = *p;
_outgoing_queue.push_back(d);
_outgoing_queue_size++;
auto deleter = [this, it = _outgoing_queue.iterator_to(d)] {
// Front entry is most likely (unless _negotiated is unresolved, check enqueue_zero_frame()) sitting
// inside send_entry() continuations and thus it cannot be cancelled.
if (it != _outgoing_queue.begin()) {
withdraw(it);
}
};
if (timeout) {
auto& t = d.t;
t.set_callback(deleter);
t.arm(timeout.value());
}
if (cancel) {
cancel->cancel_send = std::move(deleter);
cancel->send_back_pointer = &d.pcancel;
d.pcancel = cancel;
}
// New entry should continue (do its .then() lambda) after _outgoing_queue_ready
// resolves. Next entry will need to do the same after this entry's done resolves.
// Thus -- replace _outgoing_queue_ready with d's future and chain its continuation
// on ..._ready's old value.
return std::exchange(_outgoing_queue_ready, d.done.get_future()).then([this, p = std::move(p)] () mutable {
_outgoing_queue_size--;
if (__builtin_expect(!p->is_linked(), false)) {
// If withdrawn the entry is unlinked and this lambda is fired right at once
return make_ready_future<>();
}
p->uncancellable();
return send_entry(*p).then_wrapped([this, p = std::move(p)] (auto f) mutable {
if (f.failed()) {
f.ignore_ready_future();
abort();
}
p->done.set_value();
});
});
} else {
return make_exception_future<>(closed_error());
}
}
void connection::abort() {
if (!_error) {
_error = true;
_fd.shutdown_input();
}
}
future<> connection::stop() noexcept {
try {
abort();
} catch (...) {
log_exception(*this, log_level::error, "fail to shutdown connection while stopping", std::current_exception());
}
return _stopped.get_future();
}
template<typename Connection>
static bool verify_frame(Connection& c, temporary_buffer<char>& buf, size_t expected, const char* log) {
if (buf.size() != expected) {
if (buf.size() != 0) {
c.get_logger()(c.peer_address(), log);
}
return false;
}
return true;
}
template<typename Connection>
static
future<feature_map>
receive_negotiation_frame(Connection& c, input_stream<char>& in) {
return in.read_exactly(sizeof(negotiation_frame)).then([&c, &in] (temporary_buffer<char> neg) {
if (!verify_frame(c, neg, sizeof(negotiation_frame), "unexpected eof during negotiation frame")) {
return make_exception_future<feature_map>(closed_error());
}
negotiation_frame frame;
std::copy_n(neg.get_write(), sizeof(frame.magic), frame.magic);
frame.len = read_le<uint32_t>(neg.get_write() + 8);
if (std::memcmp(frame.magic, rpc_magic, sizeof(frame.magic)) != 0) {
c.get_logger()(c.peer_address(), format("wrong protocol magic: {:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}",
frame.magic[0], frame.magic[1], frame.magic[2], frame.magic[3], frame.magic[4], frame.magic[5], frame.magic[6], frame.magic[7]));
return make_exception_future<feature_map>(closed_error());
}
auto len = frame.len;
return in.read_exactly(len).then([&c, len] (temporary_buffer<char> extra) {
if (extra.size() != len) {
c.get_logger()(c.peer_address(), "unexpected eof during negotiation frame");
return make_exception_future<feature_map>(closed_error());
}
feature_map map;
auto p = extra.get();
auto end = p + extra.size();
while (p != end) {
if (end - p < 8) {
c.get_logger()(c.peer_address(), "bad feature data format in negotiation frame");
return make_exception_future<feature_map>(closed_error());
}
auto feature = static_cast<protocol_features>(read_le<uint32_t>(p));
auto f_len = read_le<uint32_t>(p + 4);
p += 8;
if (f_len > end - p) {
c.get_logger()(c.peer_address(), "buffer underflow in feature data in negotiation frame");
return make_exception_future<feature_map>(closed_error());
}
auto data = sstring(p, f_len);
p += f_len;
map.emplace(feature, std::move(data));
}
return make_ready_future<feature_map>(std::move(map));
});
});
}
inline future<rcv_buf>
read_rcv_buf(input_stream<char>& in, uint32_t size) {
return in.read_up_to(size).then([&, size] (temporary_buffer<char> data) mutable {
rcv_buf rb(size);
if (data.size() == 0) {
return make_ready_future<rcv_buf>(rcv_buf());
} else if (data.size() == size) {
rb.bufs = std::move(data);
return make_ready_future<rcv_buf>(std::move(rb));
} else {
size -= data.size();
std::vector<temporary_buffer<char>> v;
v.push_back(std::move(data));
rb.bufs = std::move(v);
return do_with(std::move(rb), std::move(size), [&in] (rcv_buf& rb, uint32_t& left) {
return repeat([&] () {
return in.read_up_to(left).then([&] (temporary_buffer<char> data) {
if (!data.size()) {
rb.size -= left;
return stop_iteration::yes;
} else {
left -= data.size();
std::get<std::vector<temporary_buffer<char>>>(rb.bufs).push_back(std::move(data));
return left ? stop_iteration::no : stop_iteration::yes;
}
});
}).then([&rb] {
return std::move(rb);
});
});
}
});
}
template<typename FrameType>
typename FrameType::return_type
connection::read_frame(socket_address info, input_stream<char>& in) {
auto header_size = FrameType::header_size();
return in.read_exactly(header_size).then([this, header_size, info, &in] (temporary_buffer<char> header) {
if (header.size() != header_size) {
if (header.size() != 0) {
_logger(info, format("unexpected eof on a {} while reading header: expected {:d} got {:d}", FrameType::role(), header_size, header.size()));
}
return FrameType::empty_value();
}
auto h = FrameType::decode_header(header.get());
auto size = FrameType::get_size(h);
if (!size) {
return FrameType::make_value(h, rcv_buf());
} else {
return read_rcv_buf(in, size).then([this, info, h = std::move(h), size] (rcv_buf rb) {
if (rb.size != size) {
_logger(info, format("unexpected eof on a {} while reading data: expected {:d} got {:d}", FrameType::role(), size, rb.size));
return FrameType::empty_value();
} else {
return FrameType::make_value(h, std::move(rb));
}
});
}
});
}
template<typename FrameType>
typename FrameType::return_type
connection::read_frame_compressed(socket_address info, std::unique_ptr<compressor>& compressor, input_stream<char>& in) {
if (compressor) {
return in.read_exactly(4).then([this, info, &in, &compressor] (temporary_buffer<char> compress_header) {
if (compress_header.size() != 4) {
if (compress_header.size() != 0) {
_logger(info, format("unexpected eof on a {} while reading compression header: expected 4 got {:d}", FrameType::role(), compress_header.size()));
}
return FrameType::empty_value();
}
auto ptr = compress_header.get();
auto size = read_le<uint32_t>(ptr);
return read_rcv_buf(in, size).then([this, size, &compressor, info] (rcv_buf compressed_data) {
if (compressed_data.size != size) {
_logger(info, format("unexpected eof on a {} while reading compressed data: expected {:d} got {:d}", FrameType::role(), size, compressed_data.size));
return FrameType::empty_value();
}
auto eb = compressor->decompress(std::move(compressed_data));
net::packet p;
auto* one = std::get_if<temporary_buffer<char>>(&eb.bufs);
if (one) {
p = net::packet(std::move(p), std::move(*one));
} else {
auto&& bufs = std::get<std::vector<temporary_buffer<char>>>(eb.bufs);
p.reserve(bufs.size());
for (auto&& b : bufs) {
p = net::packet(std::move(p), std::move(b));
}
}
return do_with(as_input_stream(std::move(p)), [this, info] (input_stream<char>& in) {
return read_frame<FrameType>(info, in);
});
});
});
} else {
return read_frame<FrameType>(info, in);
}
}
struct stream_frame {
using opt_buf_type = std::optional<rcv_buf>;
using return_type = future<opt_buf_type>;
struct header_type {
uint32_t size;
bool eos;
};
static size_t header_size() {
return 4;
}
static const char* role() {
return "stream";
}
static future<opt_buf_type> empty_value() {
return make_ready_future<opt_buf_type>(std::nullopt);
}
static header_type decode_header(const char* ptr) {
header_type h{read_le<uint32_t>(ptr), false};
if (h.size == -1U) {
h.size = 0;
h.eos = true;
}
return h;
}
static uint32_t get_size(const header_type& t) {
return t.size;
}
static future<opt_buf_type> make_value(const header_type& t, rcv_buf data) {
if (t.eos) {
data.size = -1U;
}
return make_ready_future<opt_buf_type>(std::move(data));
}
};
future<std::optional<rcv_buf>>
connection::read_stream_frame_compressed(input_stream<char>& in) {
return read_frame_compressed<stream_frame>(peer_address(), _compressor, in);
}
future<> connection::stream_close() {
auto f = make_ready_future<>();
if (!error()) {
promise<bool> p;
_sink_closed_future = p.get_future();
// stop_send_loop(), which also calls _write_buf.close(), and this code can run in parallel.
// Use _sink_closed_future to serialize them and skip second call to close()
f = _write_buf.close().finally([p = std::move(p)] () mutable { p.set_value(true);});
}
return f.finally([this] () mutable { return stop(); });
}
future<> connection::stream_process_incoming(rcv_buf&& buf) {
// we do not want to dead lock on huge packets, so let them in
// but only one at a time
auto size = std::min(size_t(buf.size), max_stream_buffers_memory);
return get_units(_stream_sem, size).then([this, buf = std::move(buf)] (semaphore_units<>&& su) mutable {
buf.su = std::move(su);
return _stream_queue.push_eventually(std::move(buf));
});
}
future<> connection::handle_stream_frame() {
return read_stream_frame_compressed(_read_buf).then([this] (std::optional<rcv_buf> data) {
if (!data) {
_error = true;
return make_ready_future<>();
}
return stream_process_incoming(std::move(*data));
});
}
future<> connection::stream_receive(circular_buffer<foreign_ptr<std::unique_ptr<rcv_buf>>>& bufs) {
return _stream_queue.not_empty().then([this, &bufs] {
bool eof = !_stream_queue.consume([&bufs] (rcv_buf&& b) {
if (b.size == -1U) { // max fragment length marks an end of a stream
return false;
} else {
bufs.push_back(make_foreign(std::make_unique<rcv_buf>(std::move(b))));
return true;
}
});
if (eof && !bufs.empty()) {
assert(_stream_queue.empty());
_stream_queue.push(rcv_buf(-1U)); // push eof marker back for next read to notice it
}
});
}
void connection::register_stream(connection_id id, xshard_connection_ptr c) {
_streams.emplace(id, std::move(c));
}
xshard_connection_ptr connection::get_stream(connection_id id) const {
auto it = _streams.find(id);
if (it == _streams.end()) {
throw std::logic_error(format("rpc stream id {} not found", id).c_str());
}
return it->second;
}
// The request frame is
// le64 optional timeout (see request_frame_with_timeout below)
// le64 message type a.k.a. verb ID
// le64 message ID
// le32 payload length
// ... payload
struct request_frame {
using opt_buf_type = std::optional<rcv_buf>;
using header_and_buffer_type = std::tuple<std::optional<uint64_t>, uint64_t, int64_t, opt_buf_type>;
using return_type = future<header_and_buffer_type>;
using header_type = std::tuple<std::optional<uint64_t>, uint64_t, int64_t, uint32_t>;
static constexpr size_t raw_header_size = sizeof(uint64_t) + sizeof(int64_t) + sizeof(uint32_t);
static size_t header_size() {
static_assert(request_frame_headroom >= raw_header_size);
return raw_header_size;
}
static const char* role() {
return "server";
}
static auto empty_value() {
return make_ready_future<header_and_buffer_type>(header_and_buffer_type(std::nullopt, uint64_t(0), 0, std::nullopt));
}
static header_type decode_header(const char* ptr) {
auto type = read_le<uint64_t>(ptr);
auto msgid = read_le<int64_t>(ptr + 8);
auto size = read_le<uint32_t>(ptr + 16);
return std::make_tuple(std::nullopt, type, msgid, size);
}
static void encode_header(uint64_t type, int64_t msg_id, snd_buf& buf, size_t off) {
auto p = buf.front().get_write() + off;
write_le<uint64_t>(p, type);
write_le<int64_t>(p + 8, msg_id);
write_le<uint32_t>(p + 16, buf.size - raw_header_size - off);
}
static uint32_t get_size(const header_type& t) {
return std::get<3>(t);
}
static auto make_value(const header_type& t, rcv_buf data) {
return make_ready_future<header_and_buffer_type>(header_and_buffer_type(std::get<0>(t), std::get<1>(t), std::get<2>(t), std::move(data)));
}
};
// This frame is used if protocol_features.TIMEOUT was negotiated
struct request_frame_with_timeout : request_frame {
using super = request_frame;
static constexpr size_t raw_header_size = sizeof(uint64_t) + request_frame::raw_header_size;
static size_t header_size() {
static_assert(request_frame_headroom >= raw_header_size);
return raw_header_size;
}
static typename super::header_type decode_header(const char* ptr) {
auto h = super::decode_header(ptr + 8);
std::get<0>(h) = read_le<uint64_t>(ptr);
return h;
}
static void encode_header(uint64_t type, int64_t msg_id, snd_buf& buf) {
static_assert(snd_buf::chunk_size >= raw_header_size, "send buffer chunk size is too small");
// expiration timer is encoded later
request_frame::encode_header(type, msg_id, buf, 8);
}
};
future<> client::request(uint64_t type, int64_t msg_id, snd_buf buf, std::optional<rpc_clock_type::time_point> timeout, cancellable* cancel) {
request_frame_with_timeout::encode_header(type, msg_id, buf);
return send(std::move(buf), timeout, cancel);
}
void
client::negotiate(feature_map provided) {
// record features returned here
for (auto&& e : provided) {
auto id = e.first;
switch (id) {
// supported features go here
case protocol_features::COMPRESS:
if (_options.compressor_factory) {
_compressor = _options.compressor_factory->negotiate(e.second, false);
}
if (!_compressor) {
throw std::runtime_error(format("RPC server responded with compression {} - unsupported", e.second));
}
break;
case protocol_features::TIMEOUT:
_timeout_negotiated = true;
break;
case protocol_features::CONNECTION_ID: {
_id = deserialize_connection_id(e.second);
break;
}
default:
// nothing to do
;
}
}
}
future<> client::negotiate_protocol(feature_map features) {
return send_negotiation_frame(std::move(features)).then([this] {
return receive_negotiation_frame(*this, _read_buf).then([this] (feature_map features) {
return negotiate(std::move(features));
});
});
}
// The response frame is
// le64 message ID
// le32 payload size
// ... payload
struct response_frame {
using opt_buf_type = std::optional<rcv_buf>;
using header_and_buffer_type = std::tuple<int64_t, opt_buf_type>;
using return_type = future<header_and_buffer_type>;
using header_type = std::tuple<int64_t, uint32_t>;
static constexpr size_t raw_header_size = sizeof(int64_t) + sizeof(uint32_t);
static size_t header_size() {
static_assert(response_frame_headroom >= raw_header_size);
return raw_header_size;
}
static const char* role() {
return "client";
}
static auto empty_value() {
return make_ready_future<header_and_buffer_type>(header_and_buffer_type(0, std::nullopt));
}
static header_type decode_header(const char* ptr) {
auto msgid = read_le<int64_t>(ptr);
auto size = read_le<uint32_t>(ptr + 8);
return std::make_tuple(msgid, size);
}
static void encode_header(int64_t msg_id, snd_buf& data) {
static_assert(snd_buf::chunk_size >= raw_header_size, "send buffer chunk size is too small");
auto p = data.front().get_write();
write_le<int64_t>(p, msg_id);
write_le<uint32_t>(p + 8, data.size - raw_header_size);
}
static uint32_t get_size(const header_type& t) {
return std::get<1>(t);
}
static auto make_value(const header_type& t, rcv_buf data) {
return make_ready_future<header_and_buffer_type>(header_and_buffer_type(std::get<0>(t), std::move(data)));
}
};
future<response_frame::header_and_buffer_type>
client::read_response_frame_compressed(input_stream<char>& in) {
return read_frame_compressed<response_frame>(_server_addr, _compressor, in);
}
stats client::get_stats() const {
stats res = _stats;
res.wait_reply = incoming_queue_length();
res.pending = outgoing_queue_length();
return res;
}
void client::wait_for_reply(id_type id, std::unique_ptr<reply_handler_base>&& h, std::optional<rpc_clock_type::time_point> timeout, cancellable* cancel) {
if (timeout) {
h->t.set_callback(std::bind(std::mem_fn(&client::wait_timed_out), this, id));
h->t.arm(timeout.value());
}
if (cancel) {
cancel->cancel_wait = [this, id] {
_outstanding[id]->cancel();
_outstanding.erase(id);
};
h->pcancel = cancel;
cancel->wait_back_pointer = &h->pcancel;
}
_outstanding.emplace(id, std::move(h));
}
void client::wait_timed_out(id_type id) {
_stats.timeout++;
_outstanding[id]->timeout();
_outstanding.erase(id);
}
future<> client::stop() noexcept {
_error = true;
try {
_socket.shutdown();
} catch(...) {
log_exception(*this, log_level::error, "fail to shutdown connection while stopping", std::current_exception());
}
return _stopped.get_future();
}
void client::abort_all_streams() {
while (!_streams.empty()) {
auto&& s = _streams.begin();
assert(s->second->get_owner_shard() == this_shard_id()); // abort can be called only locally
s->second->get()->abort();
_streams.erase(s);
}
}
void client::deregister_this_stream() {
if (_parent) {
_parent->_streams.erase(_id);
}
}
// This is the enlightened copy of the connection::send() method. Its intention is to
// keep a dummy entry in front of the queue while connect+negotiate is happenning so
// that all subsequent entries could abort on timeout or explicit cancellation.
void client::enqueue_zero_frame() {
if (_error) {
return;
}
auto p = std::make_unique<outgoing_entry>(snd_buf(0));
auto& d = *p;
_outgoing_queue.push_back(d);
// Make it in the background. Even if the client is stopped it will pick
// up all the entries hanging around
(void)std::exchange(_outgoing_queue_ready, d.done.get_future()).then_wrapped([p = std::move(p)] (auto f) mutable {
if (f.failed()) {
f.ignore_ready_future();
} else {
p->done.set_value();
}
});
}
struct client::metrics::domain {
metrics::domain_list_t list;
stats dead;
seastar::metrics::metric_groups metric_groups;
static thread_local std::unordered_map<sstring, domain> all;
static domain& find_or_create(sstring name);
stats::counter_type count_all(stats::counter_type stats::* field) noexcept {
stats::counter_type res = dead.*field;
for (const auto& m : list) {
res += m._c._stats.*field;
}
return res;
}
size_t count_all_fn(size_t (client::*fn)(void) const) noexcept {
size_t res = 0;
for (const auto& m : list) {
res += (m._c.*fn)();
}
return res;
}
domain(sstring name)
{
namespace sm = seastar::metrics;
auto domain_l = sm::label("domain")(name);
metric_groups.add_group("rpc_client", {
sm::make_gauge("count", [this] { return list.size(); },
sm::description("Total number of clients"), { domain_l }),
sm::make_counter("sent_messages", std::bind(&domain::count_all, this, &stats::sent_messages),
sm::description("Total number of messages sent"), { domain_l }),
sm::make_counter("replied", std::bind(&domain::count_all, this, &stats::replied),
sm::description("Total number of responses received"), { domain_l }),
sm::make_counter("exception_received", std::bind(&domain::count_all, this, &stats::exception_received),
sm::description("Total number of exceptional responses received"), { domain_l }).set_skip_when_empty(),
sm::make_counter("timeout", std::bind(&domain::count_all, this, &stats::timeout),
sm::description("Total number of timeout responses"), { domain_l }).set_skip_when_empty(),
sm::make_gauge("pending", std::bind(&domain::count_all_fn, this, &client::outgoing_queue_length),
sm::description("Number of queued outbound messages"), { domain_l }),
sm::make_gauge("wait_reply", std::bind(&domain::count_all_fn, this, &client::incoming_queue_length),
sm::description("Number of replies waiting for"), { domain_l }),
});
}
};
thread_local std::unordered_map<sstring, client::metrics::domain> client::metrics::domain::all;
client::metrics::domain& client::metrics::domain::find_or_create(sstring name) {
auto i = all.try_emplace(name, name);
return i.first->second;
}
client::metrics::metrics(const client& c)
: _c(c)
, _domain(domain::find_or_create(_c._options.metrics_domain))
{
_domain.list.push_back(*this);
}
client::metrics::~metrics() {
_domain.dead.replied += _c._stats.replied;
_domain.dead.exception_received += _c._stats.exception_received;
_domain.dead.sent_messages += _c._stats.sent_messages;
_domain.dead.timeout += _c._stats.timeout;
}
client::client(const logger& l, void* s, client_options ops, socket socket, const socket_address& addr, const socket_address& local)
: rpc::connection(l, s), _socket(std::move(socket)), _server_addr(addr), _local_addr(local), _options(ops), _metrics(*this)
{
_socket.set_reuseaddr(ops.reuseaddr);
// Run client in the background.
// Communicate result via _stopped.
// The caller has to call client::stop() to synchronize.
(void)_socket.connect(addr, local).then([this, ops = std::move(ops)] (connected_socket fd) {
fd.set_nodelay(ops.tcp_nodelay);
if (ops.keepalive) {
fd.set_keepalive(true);
fd.set_keepalive_parameters(ops.keepalive.value());
}
set_socket(std::move(fd));
feature_map features;
if (_options.compressor_factory) {
features[protocol_features::COMPRESS] = _options.compressor_factory->supported();
}
if (_options.send_timeout_data) {
features[protocol_features::TIMEOUT] = "";
}
if (_options.stream_parent) {
features[protocol_features::STREAM_PARENT] = serialize_connection_id(_options.stream_parent);
}
if (!_options.isolation_cookie.empty()) {
features[protocol_features::ISOLATION] = _options.isolation_cookie;
}
return negotiate_protocol(std::move(features)).then([this] {
_propagate_timeout = !is_stream();
set_negotiated();
return do_until([this] { return _read_buf.eof() || _error; }, [this] () mutable {
if (is_stream()) {
return handle_stream_frame();
}
return read_response_frame_compressed(_read_buf).then([this] (std::tuple<int64_t, std::optional<rcv_buf>> msg_id_and_data) {
auto& msg_id = std::get<0>(msg_id_and_data);
auto& data = std::get<1>(msg_id_and_data);
auto it = _outstanding.find(std::abs(msg_id));
if (!data) {
_error = true;
} else if (it != _outstanding.end()) {
auto handler = std::move(it->second);
_outstanding.erase(it);
(*handler)(*this, msg_id, std::move(data.value()));
} else if (msg_id < 0) {
try {
std::rethrow_exception(unmarshal_exception(data.value()));
} catch(const unknown_verb_error& ex) {
// if this is unknown verb exception with unknown id ignore it
// can happen if unknown verb was used by no_wait client
get_logger()(peer_address(), format("unknown verb exception {:d} ignored", ex.type));
} catch(...) {
// We've got error response but handler is no longer waiting, could be timed out.
log_exception(*this, log_level::info, "ignoring error response", std::current_exception());
}
} else {
// we get a reply for a message id not in _outstanding
// this can happened if the message id is timed out already
get_logger()(peer_address(), log_level::debug, "got a reply for an expired message id");
}
});
});
});
}).then_wrapped([this] (future<> f) {
std::exception_ptr ep;
if (f.failed()) {
ep = f.get_exception();
if (_connected) {
if (is_stream()) {
log_exception(*this, log_level::error, "client stream connection dropped", ep);
} else {
log_exception(*this, log_level::error, "client connection dropped", ep);
}
} else {
if (is_stream()) {
log_exception(*this, log_level::debug, "stream fail to connect", ep);
} else {
log_exception(*this, log_level::debug, "fail to connect", ep);
}
}
}
_error = true;
_stream_queue.abort(std::make_exception_ptr(stream_closed()));
return stop_send_loop(ep).then_wrapped([this] (future<> f) {
f.ignore_ready_future();
_outstanding.clear();
if (is_stream()) {
deregister_this_stream();
} else {
abort_all_streams();
}
}).finally([this]{
_stopped.set_value();
});
});
enqueue_zero_frame();
}
client::client(const logger& l, void* s, const socket_address& addr, const socket_address& local)
: client(l, s, client_options{}, make_socket(), addr, local)
{}
client::client(const logger& l, void* s, client_options options, const socket_address& addr, const socket_address& local)
: client(l, s, options, make_socket(), addr, local)
{}
client::client(const logger& l, void* s, socket socket, const socket_address& addr, const socket_address& local)
: client(l, s, client_options{}, std::move(socket), addr, local)
{}
future<feature_map>
server::connection::negotiate(feature_map requested) {
feature_map ret;
future<> f = make_ready_future<>();