forked from rethinkdb/rethinkdb_rebirth
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathreal_table.hpp
134 lines (119 loc) · 4.67 KB
/
real_table.hpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
// Copyright 2010-2014 RethinkDB, all rights reserved
#ifndef RDB_PROTOCOL_REAL_TABLE_HPP_
#define RDB_PROTOCOL_REAL_TABLE_HPP_
#include <map>
#include <set>
#include <string>
#include <vector>
#include "rdb_protocol/configured_limits.hpp"
#include "rdb_protocol/context.hpp"
#include "rdb_protocol/protocol.hpp"
namespace ql {
class datum_range_t;
namespace changefeed {
class client_t;
}
}
class table_meta_client_t;
/* `real_table_t` is a concrete subclass of `base_table_t` that routes its queries across
the network via the clustering logic to a B-tree. The administration logic is responsible
for constructing and returning them from `reql_cluster_interface_t::table_find()`. */
namespace ql {
class reader_t;
}
class real_table_t final : public base_table_t {
public:
/* This doesn't automatically wait for readiness. */
real_table_t(
namespace_id_t _uuid,
namespace_interface_access_t _namespace_access,
const std::string &_pkey,
ql::changefeed::client_t *_changefeed_client,
table_meta_client_t *table_meta_client) :
uuid(_uuid),
namespace_access(_namespace_access),
pkey(_pkey),
changefeed_client(_changefeed_client),
m_table_meta_client(table_meta_client) { }
namespace_id_t get_id() const;
const std::string &get_pkey() const;
ql::datum_t read_row(ql::env_t *env, ql::datum_t pval, read_mode_t read_mode);
counted_t<ql::datum_stream_t> read_all(
ql::env_t *env,
const std::string &sindex,
ql::backtrace_id_t bt,
const std::string &table_name, // The table's own name, for display purposes.
const ql::datumspec_t &datumspec,
sorting_t sorting,
read_mode_t read_mode);
counted_t<ql::datum_stream_t> read_changes(
ql::env_t *env,
const ql::changefeed::streamspec_t &ss,
ql::backtrace_id_t bt);
counted_t<ql::datum_stream_t> read_intersecting(
ql::env_t *env,
const std::string &sindex,
ql::backtrace_id_t bt,
const std::string &table_name,
read_mode_t read_mode,
const ql::datum_t &query_geometry);
ql::datum_t read_nearest(
ql::env_t *env,
const std::string &sindex,
const std::string &table_name,
read_mode_t read_mode,
lon_lat_point_t center,
double max_dist,
uint64_t max_results,
const ellipsoid_spec_t &geo_system,
dist_unit_t dist_unit,
const ql::configured_limits_t &limits);
ql::datum_t write_batched_replace(
ql::env_t *env,
const std::vector<ql::datum_t> &keys,
const counted_t<const ql::func_t> &func,
return_changes_t _return_changes,
durability_requirement_t durability,
ignore_write_hook_t ignore_write_hook);
ql::datum_t write_batched_insert(
ql::env_t *env,
std::vector<ql::datum_t> &&inserts,
std::vector<bool> &&pkey_is_autogenerated,
conflict_behavior_t conflict_behavior,
optional<counted_t<const ql::func_t> > conflict_func,
return_changes_t return_changes,
durability_requirement_t durability,
ignore_write_hook_t ignore_write_hook);
bool write_sync_depending_on_durability(ql::env_t *env,
durability_requirement_t durability);
scoped_ptr_t<ql::reader_t> read_all_with_sindexes(
ql::env_t *env,
const std::string &sindex,
ql::backtrace_id_t bt,
const std::string &table_name,
const ql::datumspec_t &datumspec,
sorting_t sorting,
read_mode_t read_mode) final;
/* These are not part of the `base_table_t` interface. They wrap the `read()`,
and `write()` methods of the underlying `namespace_interface_t` to add profiling
information. Specifically, they:
* Set the explain field in the read_t/write_t object so that the shards know
whether or not to do profiling
* Construct a splitter_t
* Call the corresponding method on the `namespace_if`
* splitter_t::give_splits with the event logs from the shards
These are public because some of the stuff in `datum_stream.hpp` needs to be
able to access them. */
void read_with_profile(ql::env_t *env, const read_t &, read_response_t *response);
void write_with_profile(ql::env_t *env, write_t *, write_response_t *response);
private:
optional<counted_t<const ql::func_t> > get_write_hook(
ql::env_t *env,
ignore_write_hook_t ignore_write_hook);
namespace_id_t uuid;
namespace_interface_access_t namespace_access;
std::string pkey;
ql::changefeed::client_t *changefeed_client;
table_meta_client_t *m_table_meta_client;
};
#endif /* RDB_PROTOCOL_REAL_TABLE_HPP_ */