-
Notifications
You must be signed in to change notification settings - Fork 7
/
xorstr.hpp
250 lines (215 loc) · 10.7 KB
/
xorstr.hpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
/*
* Copyright 2017 - 2021 Justas Masiulis
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef JM_XORSTR_HPP
#define JM_XORSTR_HPP
#if defined(_M_ARM64) || defined(__aarch64__) || defined(_M_ARM) || defined(__arm__)
#include <arm_neon.h>
#elif defined(_M_X64) || defined(__amd64__) || defined(_M_IX86) || defined(__i386__)
#include <immintrin.h>
#else
#error Unsupported platform
#endif
#include <cstdint>
#include <cstddef>
#include <utility>
#include <type_traits>
#define xorstr(str) ::jm::xor_string([]() { return str; }, std::integral_constant<std::size_t, sizeof(str) / sizeof(*str)>{}, std::make_index_sequence<::jm::detail::_buffer_size<sizeof(str)>()>{})
#define xorstr_(str) xorstr(str).crypt_get()
#ifdef _MSC_VER
#define XORSTR_FORCEINLINE __forceinline
#else
#define XORSTR_FORCEINLINE __attribute__((always_inline)) inline
#endif
#if defined(__clang__) || defined(__GNUC__)
#define JM_XORSTR_LOAD_FROM_REG(x) ::jm::detail::load_from_reg(x)
#else
#define JM_XORSTR_LOAD_FROM_REG(x) (x)
#endif
namespace jm {
namespace detail {
template<std::size_t Size>
XORSTR_FORCEINLINE constexpr std::size_t _buffer_size()
{
return ((Size / 16) + (Size % 16 != 0)) * 2;
}
template<std::uint32_t Seed>
XORSTR_FORCEINLINE constexpr std::uint32_t key4() noexcept
{
std::uint32_t value = Seed;
for(char c : __TIME__)
value = static_cast<std::uint32_t>((value ^ c) * 16777619ull);
return value;
}
template<std::size_t S>
XORSTR_FORCEINLINE constexpr std::uint64_t key8()
{
constexpr auto first_part = key4<2166136261 + S>();
constexpr auto second_part = key4<first_part>();
return (static_cast<std::uint64_t>(first_part) << 32) | second_part;
}
// loads up to 8 characters of string into uint64 and xors it with the key
template<std::size_t N, class CharT>
XORSTR_FORCEINLINE constexpr std::uint64_t
load_xored_str8(std::uint64_t key, std::size_t idx, const CharT* str) noexcept
{
using cast_type = typename std::make_unsigned<CharT>::type;
constexpr auto value_size = sizeof(CharT);
constexpr auto idx_offset = 8 / value_size;
std::uint64_t value = key;
for(std::size_t i = 0; i < idx_offset && i + idx * idx_offset < N; ++i)
value ^=
(std::uint64_t{ static_cast<cast_type>(str[i + idx * idx_offset]) }
<< ((i % idx_offset) * 8 * value_size));
return value;
}
// forces compiler to use registers instead of stuffing constants in rdata
XORSTR_FORCEINLINE std::uint64_t load_from_reg(std::uint64_t value) noexcept
{
#if defined(__clang__) || defined(__GNUC__)
asm("" : "=r"(value) : "0"(value) :);
#endif
return value;
}
template<std::uint64_t V>
struct uint64_v {
constexpr static std::uint64_t value = V;
};
} // namespace detail
template<class CharT, std::size_t Size, class Keys, class Indices>
class xor_string;
template<class CharT, std::size_t Size, std::uint64_t... Keys, std::size_t... Indices>
class xor_string<CharT, Size, std::integer_sequence<std::uint64_t, Keys...>, std::index_sequence<Indices...>> {
#ifndef JM_XORSTR_DISABLE_AVX_INTRINSICS
constexpr static inline std::uint64_t alignment = ((Size > 16) ? 32 : 16);
#else
constexpr static inline std::uint64_t alignment = 16;
#endif
alignas(alignment) std::uint64_t _storage[sizeof...(Keys)];
public:
using value_type = CharT;
using size_type = std::size_t;
using pointer = CharT*;
using const_pointer = const CharT*;
template<class L>
XORSTR_FORCEINLINE xor_string(L l, std::integral_constant<std::size_t, Size>, std::index_sequence<Indices...>) noexcept
: _storage{ JM_XORSTR_LOAD_FROM_REG(detail::uint64_v<detail::load_xored_str8<Size>(Keys, Indices, l())>::value)... }
{}
XORSTR_FORCEINLINE constexpr size_type size() const noexcept
{
return Size - 1;
}
XORSTR_FORCEINLINE void crypt() noexcept
{
// everything is inlined by hand because a certain compiler with a certain linker is _very_ slow
#if defined(__clang__)
alignas(alignment)
std::uint64_t arr[]{ JM_XORSTR_LOAD_FROM_REG(Keys)... };
std::uint64_t* keys =
(std::uint64_t*)JM_XORSTR_LOAD_FROM_REG((std::uint64_t)arr);
#else
alignas(alignment) std::uint64_t keys[]{ JM_XORSTR_LOAD_FROM_REG(Keys)... };
#endif
#if defined(_M_ARM64) || defined(__aarch64__) || defined(_M_ARM) || defined(__arm__)
#if defined(__clang__)
((Indices >= sizeof(_storage) / 16 ? static_cast<void>(0) : __builtin_neon_vst1q_v(
reinterpret_cast<uint64_t*>(_storage) + Indices * 2,
veorq_u64(__builtin_neon_vld1q_v(reinterpret_cast<const uint64_t*>(_storage) + Indices * 2, 51),
__builtin_neon_vld1q_v(reinterpret_cast<const uint64_t*>(keys) + Indices * 2, 51)),
51)), ...);
#else // GCC, MSVC
((Indices >= sizeof(_storage) / 16 ? static_cast<void>(0) : vst1q_u64(
reinterpret_cast<uint64_t*>(_storage) + Indices * 2,
veorq_u64(vld1q_u64(reinterpret_cast<const uint64_t*>(_storage) + Indices * 2),
vld1q_u64(reinterpret_cast<const uint64_t*>(keys) + Indices * 2)))), ...);
#endif
#elif !defined(JM_XORSTR_DISABLE_AVX_INTRINSICS)
((Indices >= sizeof(_storage) / 32 ? static_cast<void>(0) : _mm256_store_si256(
reinterpret_cast<__m256i*>(_storage) + Indices,
_mm256_xor_si256(
_mm256_load_si256(reinterpret_cast<const __m256i*>(_storage) + Indices),
_mm256_load_si256(reinterpret_cast<const __m256i*>(keys) + Indices)))), ...);
if constexpr(sizeof(_storage) % 32 != 0)
_mm_store_si128(
reinterpret_cast<__m128i*>(_storage + sizeof...(Keys) - 2),
_mm_xor_si128(_mm_load_si128(reinterpret_cast<const __m128i*>(_storage + sizeof...(Keys) - 2)),
_mm_load_si128(reinterpret_cast<const __m128i*>(keys + sizeof...(Keys) - 2))));
#else
((Indices >= sizeof(_storage) / 16 ? static_cast<void>(0) : _mm_store_si128(
reinterpret_cast<__m128i*>(_storage) + Indices,
_mm_xor_si128(_mm_load_si128(reinterpret_cast<const __m128i*>(_storage) + Indices),
_mm_load_si128(reinterpret_cast<const __m128i*>(keys) + Indices)))), ...);
#endif
}
XORSTR_FORCEINLINE const_pointer get() const noexcept
{
return reinterpret_cast<const_pointer>(_storage);
}
XORSTR_FORCEINLINE pointer get() noexcept
{
return reinterpret_cast<pointer>(_storage);
}
XORSTR_FORCEINLINE pointer crypt_get() noexcept
{
// crypt() is inlined by hand because a certain compiler with a certain linker is _very_ slow
#if defined(__clang__)
alignas(alignment)
std::uint64_t arr[]{ JM_XORSTR_LOAD_FROM_REG(Keys)... };
std::uint64_t* keys =
(std::uint64_t*)JM_XORSTR_LOAD_FROM_REG((std::uint64_t)arr);
#else
alignas(alignment) std::uint64_t keys[]{ JM_XORSTR_LOAD_FROM_REG(Keys)... };
#endif
#if defined(_M_ARM64) || defined(__aarch64__) || defined(_M_ARM) || defined(__arm__)
#if defined(__clang__)
((Indices >= sizeof(_storage) / 16 ? static_cast<void>(0) : __builtin_neon_vst1q_v(
reinterpret_cast<uint64_t*>(_storage) + Indices * 2,
veorq_u64(__builtin_neon_vld1q_v(reinterpret_cast<const uint64_t*>(_storage) + Indices * 2, 51),
__builtin_neon_vld1q_v(reinterpret_cast<const uint64_t*>(keys) + Indices * 2, 51)),
51)), ...);
#else // GCC, MSVC
((Indices >= sizeof(_storage) / 16 ? static_cast<void>(0) : vst1q_u64(
reinterpret_cast<uint64_t*>(_storage) + Indices * 2,
veorq_u64(vld1q_u64(reinterpret_cast<const uint64_t*>(_storage) + Indices * 2),
vld1q_u64(reinterpret_cast<const uint64_t*>(keys) + Indices * 2)))), ...);
#endif
#elif !defined(JM_XORSTR_DISABLE_AVX_INTRINSICS)
((Indices >= sizeof(_storage) / 32 ? static_cast<void>(0) : _mm256_store_si256(
reinterpret_cast<__m256i*>(_storage) + Indices,
_mm256_xor_si256(
_mm256_load_si256(reinterpret_cast<const __m256i*>(_storage) + Indices),
_mm256_load_si256(reinterpret_cast<const __m256i*>(keys) + Indices)))), ...);
if constexpr(sizeof(_storage) % 32 != 0)
_mm_store_si128(
reinterpret_cast<__m128i*>(_storage + sizeof...(Keys) - 2),
_mm_xor_si128(_mm_load_si128(reinterpret_cast<const __m128i*>(_storage + sizeof...(Keys) - 2)),
_mm_load_si128(reinterpret_cast<const __m128i*>(keys + sizeof...(Keys) - 2))));
#else
((Indices >= sizeof(_storage) / 16 ? static_cast<void>(0) : _mm_store_si128(
reinterpret_cast<__m128i*>(_storage) + Indices,
_mm_xor_si128(_mm_load_si128(reinterpret_cast<const __m128i*>(_storage) + Indices),
_mm_load_si128(reinterpret_cast<const __m128i*>(keys) + Indices)))), ...);
#endif
return (pointer)(_storage);
}
};
template<class L, std::size_t Size, std::size_t... Indices>
xor_string(L l, std::integral_constant<std::size_t, Size>, std::index_sequence<Indices...>) -> xor_string<
std::remove_const_t<std::remove_reference_t<decltype(l()[0])>>,
Size,
std::integer_sequence<std::uint64_t, detail::key8<Indices>()...>,
std::index_sequence<Indices...>>;
} // namespace jm
#endif // include guard