forked from ton-blockchain/ton
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathObjectPool.h
261 lines (230 loc) · 6.83 KB
/
ObjectPool.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
/*
This file is part of TON Blockchain Library.
TON Blockchain Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
TON Blockchain Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
Copyright 2017-2020 Telegram Systems LLP
*/
#pragma once
#include "td/utils/common.h"
#include "td/utils/logging.h"
#include <atomic>
#include <memory>
#include <utility>
namespace td {
// It is draft object pool implementaion
//
// Compared with std::shared_ptr:
// + WeakPtr are much faster. Just pointer copy. No barriers, no atomics.
// - We can't destroy object, because we don't know if it is pointed to by some weak pointer
//
template <class DataT>
class ObjectPool {
struct Storage;
public:
class WeakPtr {
public:
WeakPtr() : generation_(-1), storage_(nullptr) {
}
WeakPtr(int32 generation, Storage *storage) : generation_(generation), storage_(storage) {
}
DataT &operator*() const {
return storage_->data;
}
DataT *operator->() const {
return &**this;
}
// Pattern of usage: 1. Read an object 2. Check if read was valid via is_alive
//
// It is not very usual case of acquire/release use.
// Instead of publishing an object via some flag we do the opposite.
// We publish new generation via destruction of the data.
// In usual case if we see a flag, then we are able to use an object.
// In our case if we have used an object and it is already invalid, then generation will mismatch
bool is_alive() const {
if (!storage_) {
return false;
}
std::atomic_thread_fence(std::memory_order_acquire);
return generation_ == storage_->generation.load(std::memory_order_relaxed);
}
// Used for ActorId
bool is_alive_unsafe() const {
if (!storage_) {
return false;
}
return generation_ == storage_->generation.load(std::memory_order_relaxed);
}
bool empty() const {
return storage_ == nullptr;
}
void clear() {
generation_ = -1;
storage_ = nullptr;
}
int32 generation() {
return generation_;
}
private:
int32 generation_;
Storage *storage_;
};
class OwnerPtr {
public:
OwnerPtr() = default;
OwnerPtr(const OwnerPtr &) = delete;
OwnerPtr &operator=(const OwnerPtr &) = delete;
OwnerPtr(OwnerPtr &&other) : storage_(other.storage_), parent_(other.parent_) {
other.storage_ = nullptr;
other.parent_ = nullptr;
}
OwnerPtr &operator=(OwnerPtr &&other) {
if (this != &other) {
storage_ = other.storage_;
parent_ = other.parent_;
other.storage_ = nullptr;
other.parent_ = nullptr;
}
return *this;
}
~OwnerPtr() {
reset();
}
DataT *get() {
return &storage_->data;
}
DataT &operator*() {
return *get();
}
DataT *operator->() {
return get();
}
const DataT *get() const {
return &storage_->data;
}
const DataT &operator*() const {
return *get();
}
const DataT *operator->() const {
return get();
}
WeakPtr get_weak() {
return WeakPtr(storage_->generation.load(std::memory_order_relaxed), storage_);
}
int32 generation() {
return storage_->generation.load(std::memory_order_relaxed);
}
Storage *release() {
auto result = storage_;
storage_ = nullptr;
return result;
}
bool empty() const {
return storage_ == nullptr;
}
void reset() {
if (storage_ != nullptr) {
// for crazy cases when data owns owner pointer to itself.
auto tmp = storage_;
storage_ = nullptr;
parent_->release(OwnerPtr(tmp, parent_));
}
}
private:
friend class ObjectPool;
OwnerPtr(Storage *storage, ObjectPool<DataT> *parent) : storage_(storage), parent_(parent) {
}
Storage *storage_ = nullptr;
ObjectPool<DataT> *parent_ = nullptr;
};
template <class... ArgsT>
OwnerPtr create(ArgsT &&... args) {
Storage *storage = get_storage();
storage->init_data(std::forward<ArgsT>(args)...);
return OwnerPtr(storage, this);
}
OwnerPtr create_empty() {
Storage *storage = get_storage();
return OwnerPtr(storage, this);
}
void set_check_empty(bool flag) {
check_empty_flag_ = flag;
}
void release(OwnerPtr &&owner_ptr) {
Storage *storage = owner_ptr.release();
storage->destroy_data();
release_storage(storage);
}
ObjectPool() = default;
ObjectPool(const ObjectPool &) = delete;
ObjectPool &operator=(const ObjectPool &) = delete;
ObjectPool(ObjectPool &&other) = delete;
ObjectPool &operator=(ObjectPool &&other) = delete;
~ObjectPool() {
while (head_.load()) {
auto to_delete = head_.load();
head_ = to_delete->next;
delete to_delete;
storage_count_--;
}
LOG_CHECK(storage_count_.load() == 0) << storage_count_.load();
}
private:
struct Storage {
// union {
DataT data;
//};
Storage *next = nullptr;
std::atomic<int32> generation{1};
template <class... ArgsT>
void init_data(ArgsT &&... args) {
// new (&data) DataT(std::forward<ArgsT>(args)...);
data = DataT(std::forward<ArgsT>(args)...);
}
void destroy_data() {
generation.fetch_add(1, std::memory_order_relaxed);
std::atomic_thread_fence(std::memory_order_release);
data.clear();
}
};
std::atomic<int32> storage_count_{0};
std::atomic<Storage *> head_{static_cast<Storage *>(nullptr)};
bool check_empty_flag_ = false;
// TODO(perf): allocation Storages in chunks? Anyway we won't be able to release them.
// TODO(perf): memory order
// TODO(perf): use another non lockfree list for release on the same thread
// only one thread, so no aba problem
Storage *get_storage() {
if (head_.load() == nullptr) {
storage_count_++;
return new Storage();
}
Storage *res;
while (true) {
res = head_.load();
auto *next = res->next;
if (head_.compare_exchange_weak(res, next)) {
break;
}
}
return res;
}
// release can be called from other thread
void release_storage(Storage *storage) {
while (true) {
auto *save_head = head_.load();
storage->next = save_head;
if (head_.compare_exchange_weak(save_head, storage)) {
break;
}
}
}
};
} // namespace td