forked from swiftlang/swift
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathClusteredBitVector.cpp
218 lines (191 loc) · 7.16 KB
/
ClusteredBitVector.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
//===--- ClusteredBitVector.cpp - Out-of-line code for the bit vector -----===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See http://swift.org/LICENSE.txt for license information
// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// This file implements support code for ClusteredBitVector.
//
//===----------------------------------------------------------------------===//
#include "swift/Basic/ClusteredBitVector.h"
#include "llvm/ADT/APInt.h"
#include "llvm/Support/raw_ostream.h"
using namespace swift;
ClusteredBitVector ClusteredBitVector::fromAPInt(const llvm::APInt &bits) {
// This is not a very efficient algorithm.
ClusteredBitVector result;
for (unsigned i = 0, e = bits.getBitWidth(); i != e; ++i) {
if (bits[i]) {
result.appendSetBits(1);
} else {
result.appendClearBits(1);
}
}
return result;
}
llvm::APInt ClusteredBitVector::asAPInt() const {
if (isInlineAndAllClear()) {
return llvm::APInt(size(), 0);
} else {
// This assumes that the chunk size is the same as APInt's.
// TODO: it'd be nice to be able to do this without copying.
return llvm::APInt(size(), getChunks());
}
}
void ClusteredBitVector::reallocate(size_t newCapacityInChunks) {
// If we already have out-of-line storage, the padding invariants
// will still apply, and we just need to copy the old data into
// the new allocation.
if (hasOutOfLineData()) {
auto oldData = getOutOfLineChunksPtr();
allocateAndCopyFrom(oldData, newCapacityInChunks, getLengthInChunks());
delete[] (oldData - 1);
return;
}
// Otherwise, we might need to establish the invariants. If we
// were in inline-and-all-clear mode, the vector might logically
// be much longer than a single chunk, but all-zero.
HasOutOfLineData = true;
auto oldDataValue = Data;
auto newData = allocate(newCapacityInChunks);
// All of these cases initialize 'length' chunks in newData.
switch (auto length = getLengthInChunks()) {
case 0:
break;
case 1:
newData[0] = oldDataValue;
break;
default:
assert(oldDataValue == 0 && "not previously in inline-and-all-clear?");
memset(newData, 0, length * sizeof(ChunkType));
break;
}
}
void ClusteredBitVector::appendReserved(size_t numBits,
llvm::function_ref<ChunkType(size_t numBitsWanted)> generator) {
assert(LengthInBits + numBits <= getCapacityInBits());
assert(numBits > 0);
auto getMoreBits =
[&](size_t numBitsWanted) -> ChunkType {
auto result = generator(numBitsWanted);
assert((numBitsWanted == ChunkSizeInBits ||
result <= (ChunkType(1) << numBitsWanted)) &&
"generator returned out-of-range value!");
return result;
};
// Check whether the current end of the vector is a clean multiple
// of the chunk size.
auto offset = LengthInBits % ChunkSizeInBits;
ChunkType *nextChunk = &getChunksPtr()[LengthInBits / ChunkSizeInBits];
// Now we can go ahead and add in the right number of extra bits.
LengthInBits += numBits;
// If not, we need to combine the generator result with that last chunk.
if (offset) {
auto claimedBits = std::min(numBits, size_t(ChunkSizeInBits - offset));
// The extra bits in data[chunkIndex] are guaranteed to be zero.
*nextChunk++ |= (getMoreBits(claimedBits) << offset);
numBits -= claimedBits;
if (numBits == 0) return;
}
// For the rest, just generator chunks one at a time.
do {
auto claimedBits = std::min(numBits, size_t(ChunkSizeInBits));
*nextChunk++ = getMoreBits(claimedBits);
numBits -= claimedBits;
} while (numBits);
}
void ClusteredBitVector::appendConstantBitsReserved(size_t numBits,
bool addOnes) {
assert(LengthInBits + numBits <= getCapacityInBits());
assert(numBits > 0);
ChunkType pattern = (addOnes ? ~ChunkType(0) : ChunkType(0));
appendReserved(numBits, [=](size_t numBitsWanted) -> ChunkType {
return (pattern >> (ChunkSizeInBits - numBitsWanted));
});
}
void ClusteredBitVector::appendReserved(size_t numBits,
const ChunkType *nextChunk) {
// This is easy if we're not currently at an offset.
// (Note that this special case generator relies on the exact
// implementation of the main appendReserved routine.)
auto offset = LengthInBits % ChunkSizeInBits;
if (!offset) {
appendReserved(numBits, [&](size_t numBitsWanted) -> ChunkType {
return *nextChunk++;
});
return;
}
// But if we are, we need to be constantly mixing values.
ChunkType prevChunk = 0;
size_t bitsRemaining = 0;
appendReserved(numBits, [&](size_t numBitsWanted) -> ChunkType {
auto resultMask = (numBitsWanted == ChunkSizeInBits
? ~ChunkType(0)
: ((ChunkType(1) << numBitsWanted) - 1));
// If we can resolve the desired bits out of the current chunk,
// all the better.
if (numBitsWanted <= bitsRemaining) {
assert(numBitsWanted != ChunkSizeInBits);
auto result = prevChunk & resultMask;
bitsRemaining -= numBitsWanted;
prevChunk >>= numBitsWanted;
return result;
}
// |-- bitsRemaining --|-------- ChunkSizeInBits --------|
// | prevChunk | nextChunk |
// |------ numBitsWanted ------|----- bitsRemaining' ----|
// | prevChunk' |
auto newChunk = *nextChunk++;
auto result = (prevChunk | (newChunk << bitsRemaining)) & resultMask;
prevChunk = newChunk >> (numBitsWanted - bitsRemaining);
bitsRemaining = ChunkSizeInBits + bitsRemaining - numBitsWanted;
return result;
});
}
bool ClusteredBitVector::equalsSlowCase(const ClusteredBitVector &lhs,
const ClusteredBitVector &rhs) {
assert(lhs.size() == rhs.size());
assert(!lhs.empty() && !rhs.empty());
assert(lhs.hasOutOfLineData() || rhs.hasOutOfLineData());
if (!lhs.hasOutOfLineData()) {
assert(lhs.Data == 0 || lhs.getLengthInChunks() == 1);
for (auto chunk : rhs.getOutOfLineChunks())
if (chunk != lhs.Data)
return false;
return true;
} else if (!rhs.hasOutOfLineData()) {
assert(rhs.Data == 0 || rhs.getLengthInChunks() == 1);
for (auto chunk : lhs.getOutOfLineChunks())
if (chunk != rhs.Data)
return false;
return true;
} else {
auto lhsChunks = lhs.getOutOfLineChunks();
auto rhsChunks = rhs.getOutOfLineChunks();
assert(lhsChunks.size() == rhsChunks.size());
return lhsChunks == rhsChunks;
}
}
void ClusteredBitVector::dump() const {
print(llvm::errs());
}
/// Pretty-print the vector.
void ClusteredBitVector::print(llvm::raw_ostream &out) const {
// Print in 8 clusters of 8 bits per row.
for (size_t i = 0, e = size(); ; ) {
out << ((*this)[i++] ? '1' : '0');
if (i == e) {
return;
} else if ((i & 64) == 0) {
out << '\n';
} else if ((i & 8) == 0) {
out << ' ';
}
}
}