forked from facebook/hermes
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathOSCompatWindows.cpp
387 lines (328 loc) · 11.4 KB
/
OSCompatWindows.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the LICENSE
* file in the root directory of this source tree.
*/
#ifdef _WINDOWS
#include "hermes/Support/ErrorHandling.h"
#include "hermes/Support/OSCompat.h"
#include <cassert>
// Include windows.h first because other includes from windows API need it.
// The blank line after the include is necessary to avoid lint error.
#define WIN32_LEAN_AND_MEAN
#define NOMINMAX // do not define min/max macros
#include <windows.h>
#include <io.h>
#include <psapi.h>
#include "llvm/ADT/Twine.h"
#include "llvm/Support/raw_ostream.h"
namespace hermes {
namespace oscompat {
#ifndef NDEBUG
static size_t testPgSz = 0;
void set_test_page_size(size_t pageSz) {
testPgSz = pageSz;
}
void reset_test_page_size() {
testPgSz = 0;
}
#endif
static inline size_t page_size_real() {
SYSTEM_INFO system_info;
GetSystemInfo(&system_info);
return system_info.dwPageSize;
}
size_t page_size() {
#ifndef NDEBUG
if (testPgSz != 0) {
return testPgSz;
}
#endif
return page_size_real();
}
#ifndef NDEBUG
static constexpr size_t unsetVMAllocLimit = std::numeric_limits<size_t>::max();
static size_t totalVMAllocLimit = unsetVMAllocLimit;
void set_test_vm_allocate_limit(size_t totSz) {
totalVMAllocLimit = totSz;
}
void unset_test_vm_allocate_limit() {
totalVMAllocLimit = unsetVMAllocLimit;
}
#endif // !NDEBUG
static char *alignAlloc(void *p, size_t alignment) {
return reinterpret_cast<char *>(
llvm::alignTo(reinterpret_cast<uintptr_t>(p), alignment));
}
static llvm::ErrorOr<void *>
vm_allocate_impl(void *addr, size_t sz, DWORD flags) {
void *result = VirtualAlloc(addr, sz, flags, PAGE_READWRITE);
if (result == nullptr) {
// Windows does not have POSIX error codes, but defines its own set.
// Use system_category with GetLastError so that the codes are interpreted
// correctly.
return std::error_code(GetLastError(), std::system_category());
}
return result;
}
static llvm::ErrorOr<void *> vm_allocate_impl(size_t sz) {
// Default flags are to reserve and commit.
// TODO(T40416012) introduce explicit "commit" in OSCompat abstraction of
// virtual memory
// In POSIX, a mem page implicitly transitions from "reserved" state to
// "committed" state on access. However, on Windows, accessing
// "reserved" but not "committed" page results in an access violation.
// There is no explicit call to transition to "committed" state
// in Hermes' virtual memory abstraction.
// As a result, even though Windows allows one to "reserve" a page without
// "commit"ting it, we have to do both here.
return vm_allocate_impl(nullptr, sz, MEM_RESERVE | MEM_COMMIT);
}
static std::error_code vm_free_impl(void *p, size_t sz) {
BOOL ret = VirtualFree(p, 0, MEM_RELEASE);
return ret ? std::error_code{}
: std::error_code(GetLastError(), std::system_category());
}
llvm::ErrorOr<void *> vm_allocate(size_t sz) {
#ifndef NDEBUG
assert(sz % page_size() == 0);
if (testPgSz != 0 && testPgSz > static_cast<size_t>(page_size_real())) {
return vm_allocate_aligned(sz, testPgSz);
}
if (LLVM_UNLIKELY(sz > totalVMAllocLimit)) {
return make_error_code(OOMError::TestVMLimitReached);
} else if (LLVM_UNLIKELY(totalVMAllocLimit != unsetVMAllocLimit)) {
totalVMAllocLimit -= sz;
}
#endif // !NDEBUG
return vm_allocate_impl(sz);
}
llvm::ErrorOr<void *> vm_allocate_aligned(size_t sz, size_t alignment) {
/// A value of 3 means vm_allocate_aligned will:
/// 1. Opportunistic: allocate and see if it happens to be aligned
/// 2. Regular: Try aligned allocation 3 times (see below for details)
/// 3. Fallback: Allocate more than needed, and waste the excess
constexpr int aligned_allocation_attempts = 3;
#ifndef NDEBUG
assert(sz > 0 && sz % page_size() == 0);
assert(alignment > 0 && alignment % page_size() == 0);
if (LLVM_UNLIKELY(sz > totalVMAllocLimit)) {
return make_error_code(OOMError::TestVMLimitReached);
} else if (LLVM_UNLIKELY(totalVMAllocLimit != unsetVMAllocLimit)) {
totalVMAllocLimit -= sz;
}
#endif // !NDEBUG
// Opportunistically allocate without alignment constraint,
// and see if the memory happens to be aligned.
// While this may be unlikely on the first allocation request,
// subsequent allocation requests have a good chance.
llvm::ErrorOr<void *> result = vm_allocate_impl(sz);
if (!result) {
// Don't attempt to do anything further if the allocation failed.
return result;
}
void *addr = *result;
if (LLVM_LIKELY(addr == alignAlloc(addr, alignment))) {
return addr;
}
// Free the oppotunistic allocation.
std::error_code err = vm_free_impl(addr, sz);
if (err) {
hermes_fatal(
(llvm::Twine("Failed to free memory region in vm_allocate_aligned: ") +
convert_error_to_message(err))
.str());
}
for (int attempts = 0; attempts < aligned_allocation_attempts; attempts++) {
// Allocate a larger section to ensure that it contains
// a subsection that satisfies the request.
result = vm_allocate_impl(
nullptr, sz + alignment - page_size_real(), MEM_RESERVE);
if (!result) {
return result;
}
addr = *result;
// Find the desired subsection
char *aligned = alignAlloc(addr, alignment);
// Free the larger allocation (including the desired subsection)
err = vm_free_impl(addr, sz);
if (err) {
hermes_fatal(
(llvm::Twine(
"Failed to free memory region in vm_allocate_aligned: ") +
convert_error_to_message(err))
.str());
}
// Request allocation at the desired subsection
result = vm_allocate_impl(aligned, sz, MEM_RESERVE | MEM_COMMIT);
if (result) {
assert(result.get() == aligned);
return result.get();
}
}
// Similar to the regular mechanism, but simply return the desired
// subsection (instead of free and re-allocate). This has two downsides:
// 1. Wasted virtual address space.
// 2. vm_free_aligned is now required to call VirtualQuery, which has
// a non-trivial cost.
result =
vm_allocate_impl(nullptr, sz + alignment - page_size_real(), MEM_RESERVE);
if (!result) {
return result;
}
addr = *result;
addr = alignAlloc(addr, alignment);
result = vm_allocate_impl(addr, alignment, MEM_COMMIT);
if (!result) {
hermes_fatal(
(llvm::Twine(
"Failed to commit subsection of reserved memory in vm_allocate_aligned: ") +
convert_error_to_message(result.getError()))
.str());
}
return result;
}
void vm_free(void *p, size_t sz) {
#ifndef NDEBUG
if (testPgSz != 0 && testPgSz > static_cast<size_t>(page_size_real())) {
vm_free_aligned(p, sz);
return;
}
#endif // !NDEBUG
std::error_code err = vm_free_impl(p, sz);
if (err) {
hermes_fatal((llvm::Twine("Failed to free virtual memory region: ") +
convert_error_to_message(err))
.str());
}
#ifndef NDEBUG
if (LLVM_UNLIKELY(totalVMAllocLimit != unsetVMAllocLimit) && p) {
totalVMAllocLimit += sz;
}
#endif
}
void vm_free_aligned(void *p, size_t sz) {
// VirtualQuery is necessary because p may not be the base location
// of the allocation (due to possible fallback in vm_allocate_aligned).
MEMORY_BASIC_INFORMATION mbi;
SIZE_T query_ret = VirtualQuery(p, &mbi, sizeof(MEMORY_BASIC_INFORMATION));
assert(query_ret != 0 && "Failed to invoke VirtualQuery in vm_free_aligned");
BOOL ret = VirtualFree(mbi.AllocationBase, 0, MEM_RELEASE);
assert(ret && "Failed to invoke VirtualFree in vm_free_aligned.");
(void)ret;
#ifndef NDEBUG
if (LLVM_UNLIKELY(totalVMAllocLimit != unsetVMAllocLimit) && p) {
totalVMAllocLimit += sz;
}
#endif
}
void vm_unused(void *p, size_t sz) {
#ifndef NDEBUG
const size_t PS = page_size();
assert(
reinterpret_cast<intptr_t>(p) % PS == 0 &&
"Precondition: pointer is page-aligned.");
assert(sz % PS == 0 && "Precondition: size is page-aligned.");
#endif
// TODO(T40416012) introduce explicit "commit" in OSCompat abstraction of
// virtual memory
// Do nothing.
// In POSIX, a mem page implicitly transitions from "reserved" state to
// "committed" state on access. However, on Windows, accessing
// "reserved" but not "committed" page results in an access violation.
// There is no explicit call to transition to "committed" state
// in Hermes' virtual memory abstraction.
// As a result, even though Windows has an API to transition a page from
// "committed" state back to "reserved" state, we can not invoke it here.
}
void vm_prefetch(void *p, size_t sz) {
assert(
reinterpret_cast<intptr_t>(p) % page_size() == 0 &&
"Precondition: pointer is page-aligned.");
// TODO(T40415796) provide actual "prefetch" implementation
// Do nothing
}
void vm_name(void *p, size_t sz, const char *name) {
(void)p;
(void)sz;
(void)name;
}
bool vm_protect(void *p, size_t sz, ProtectMode) {
DWORD oldProtect;
BOOL err = VirtualProtect(p, sz, PAGE_READWRITE, &oldProtect);
return err != 0;
}
bool vm_madvise(void *p, size_t sz, MAdvice advice) {
// Not implemented.
return false;
}
int pages_in_ram(const void *p, size_t sz, llvm::SmallVectorImpl<int> *runs) {
// Not yet supported.
return -1;
}
uint64_t peak_rss() {
PROCESS_MEMORY_COUNTERS pmc;
auto ret = GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc));
if (ret != 0) {
// failed
return 0;
}
return pmc.PeakWorkingSetSize;
}
bool num_context_switches(long &voluntary, long &involuntary) {
// Not yet supported.
voluntary = involuntary = -1;
return false;
}
uint64_t thread_id() {
return GetCurrentThreadId();
}
static std::chrono::microseconds::rep fromFileTimeToMicros(
const FILETIME &fileTime) {
ULARGE_INTEGER uli;
uli.LowPart = fileTime.dwLowDateTime;
uli.HighPart = fileTime.dwHighDateTime;
// convert from 100-nanosecond to microsecond
return uli.QuadPart / 10;
}
std::chrono::microseconds thread_cpu_time() {
FILETIME creationTime;
FILETIME exitTime;
FILETIME kernelTime;
FILETIME userTime;
GetThreadTimes(
GetCurrentThread(), &creationTime, &exitTime, &kernelTime, &userTime);
return std::chrono::microseconds(
fromFileTimeToMicros(kernelTime) + fromFileTimeToMicros(userTime));
}
bool thread_page_fault_count(int64_t *outMinorFaults, int64_t *outMajorFaults) {
// Windows provides GetProcessMemoryInfo. There is no counterpart of this
// API call for threads. In addition, it measures soft page faults.
// It may be possible to get per-thread information by using ETW (Event
// Tracing for Windows), which is probably overkill for the use case here.
// not supported on Windows
return false;
}
std::string thread_name() {
// SetThreadDescription/GetThreadDescription is too new (since
// Windows 10 version 1607).
// Prior to that, the concept of thread names only exists when
// a Visual Studio debugger is attached.
return "";
}
bool set_env(const char *name, const char *value) {
// Setting an env var to empty requires a lot of hacks on Windows
assert(*value != '\0' && "value cannot be empty string");
return _putenv_s(name, value) == 0;
}
bool unset_env(const char *name) {
return _putenv_s(name, "") == 0;
}
/// Windows does not have the concept of alternate signal stacks, so nothing to
/// do.
SigAltStackDeleter::SigAltStackDeleter() {}
SigAltStackDeleter::~SigAltStackDeleter() {}
} // namespace oscompat
} // namespace hermes
#endif // _WINDOWS