Skip to content

Commit

Permalink
Option to revert to sequential read (Chia-Network#296)
Browse files Browse the repository at this point in the history
* Option to revert to sequential read

* Reverse disable boolean flow

* Fix test

* debug ci runner

* debugging runner

* Add swap space to runner

* remove prints

* increase swap size
  • Loading branch information
emlowe authored Jul 21, 2021
1 parent 09b09c1 commit 640713a
Show file tree
Hide file tree
Showing 6 changed files with 68 additions and 17 deletions.
8 changes: 8 additions & 0 deletions .github/workflows/build-test-cplusplus.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ jobs:
os: [ubuntu-20.04, windows-latest]

steps:

- name: Cancel previous runs on the same branch
if: ${{ github.ref != 'refs/heads/main' }}
uses: styfle/[email protected]
Expand All @@ -35,11 +36,16 @@ jobs:
run: |
sudo apt update
sudo apt-get install valgrind -y
sudo fallocate -l 16G /swapfile
sudo chmod 600 /swapfile
sudo mkswap /swapfile
sudo swapon /swapfile
mkdir build
cd build
cmake ../
cmake --build . -- -j 6
ctest -j 6 --output-on-failure
swapon -s
valgrind --leak-check=full --show-leak-kinds=all --errors-for-leak-kinds=all ctest -j 6 --output-on-failure
- name: cmake, RunTests with address- and undefined sanitizer on Ubuntu
Expand All @@ -49,6 +55,7 @@ jobs:
cd build-asan
cmake -DCMAKE_BUILD_TYPE=ASAN ../
cmake --build . -- -j 6
swapon -s
./RunTests
- name: cmake, RunTests with thread sanitizer on Ubuntu
Expand All @@ -58,4 +65,5 @@ jobs:
cd build-tsan
cmake -DCMAKE_BUILD_TYPE=TSAN ../
cmake --build . -- -j 6
swapon -s
./RunTests
6 changes: 3 additions & 3 deletions python-bindings/chiapos.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -119,19 +119,19 @@ PYBIND11_MODULE(chiapos, m)
delete[] quality_buf;
return ret;
})
.def("get_full_proof", [](DiskProver &dp, const py::bytes &challenge, uint32_t index) {
.def("get_full_proof", [](DiskProver &dp, const py::bytes &challenge, uint32_t index, bool parallel_read) {
std::string challenge_str(challenge);
const uint8_t *challenge_ptr = reinterpret_cast<const uint8_t *>(challenge_str.data());
py::gil_scoped_release release;
LargeBits proof = dp.GetFullProof(challenge_ptr, index);
LargeBits proof = dp.GetFullProof(challenge_ptr, index, parallel_read);
py::gil_scoped_acquire acquire;
uint8_t *proof_buf = new uint8_t[Util::ByteAlign(64 * dp.GetSize()) / 8];
proof.ToBytes(proof_buf);
py::bytes ret = py::bytes(
reinterpret_cast<char *>(proof_buf), Util::ByteAlign(64 * dp.GetSize()) / 8);
delete[] proof_buf;
return ret;
});
},py::arg("challenge"), py::arg("index"), py::arg("parallel_read") = true);

py::class_<Verifier>(m, "Verifier")
.def(py::init<>())
Expand Down
7 changes: 5 additions & 2 deletions src/cli.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ int main(int argc, char *argv[]) try {
string id = "022fb42c08c12de3a6af053880199806532e79515f94e83461612101f9412f9e";
bool nobitfield = false;
bool show_progress = false;
bool parallel_read = true;
uint32_t buffmegabytes = 0;

options.allow_unrecognised_options().add_options()(
Expand All @@ -102,6 +103,8 @@ int main(int argc, char *argv[]) try {
cxxopts::value<uint32_t>(buffmegabytes))(
"p, progress", "Display progress percentage during plotting",
cxxopts::value<bool>(show_progress))(
"parallel_read", "Set to false to use sequential reads",
cxxopts::value<bool>(parallel_read)->default_value("true"))(
"help", "Print help");

auto result = options.parse(argc, argv);
Expand Down Expand Up @@ -177,7 +180,7 @@ int main(int argc, char *argv[]) try {
for (uint32_t i = 0; i < qualities.size(); i++) {
k = prover.GetSize();
uint8_t *proof_data = new uint8_t[8 * k];
LargeBits proof = prover.GetFullProof(challenge_bytes, i);
LargeBits proof = prover.GetFullProof(challenge_bytes, i, parallel_read);
proof.ToBytes(proof_data);
cout << "Proof: 0x" << Util::HexStr(proof_data, k * 8) << endl;
delete[] proof_data;
Expand Down Expand Up @@ -259,7 +262,7 @@ int main(int argc, char *argv[]) try {
vector<LargeBits> qualities = prover.GetQualitiesForChallenge(hash.data());

for (uint32_t i = 0; i < qualities.size(); i++) {
LargeBits proof = prover.GetFullProof(hash.data(), i);
LargeBits proof = prover.GetFullProof(hash.data(), i, parallel_read);
uint8_t *proof_data = new uint8_t[proof.GetSize() / 8];
proof.ToBytes(proof_data);
cout << "i: " << num << std::endl;
Expand Down
40 changes: 30 additions & 10 deletions src/prover_disk.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ class DiskProver {
// Given a challenge, and an index, returns a proof of space. This assumes GetQualities was
// called, and there are actually proofs present. The index represents which proof to fetch,
// if there are multiple.
LargeBits GetFullProof(const uint8_t* challenge, uint32_t index)
LargeBits GetFullProof(const uint8_t* challenge, uint32_t index, bool parallel_read = true)
{
LargeBits full_proof;

Expand All @@ -219,7 +219,12 @@ class DiskProver {
}

// Gets the 64 leaf x values, concatenated together into a k*64 bit string.
std::vector<Bits> xs = GetInputs(p7_entries[index], 6);
std::vector<Bits> xs;
if (parallel_read) {
xs = GetInputs(p7_entries[index], 6);
} else {
xs = GetInputs(p7_entries[index], 6, &disk_file); // Passing in a disk_file disabled the parallel reads
}

// Sorts them according to proof ordering, where
// f1(x0) m= f1(x1), f2(x0, x1) m= f2(x2, x3), etc. On disk, they are not stored in
Expand Down Expand Up @@ -635,11 +640,18 @@ class DiskProver {
// all of the leaves (x values). For example, for depth=5, it fetches the position-th
// entry in table 5, reading the two back pointers from the line point, and then
// recursively calling GetInputs for table 4.
std::vector<Bits> GetInputs(uint64_t position, uint8_t depth)
std::vector<Bits> GetInputs(uint64_t position, uint8_t depth, std::ifstream* disk_file = nullptr)
{
// Create individual file handles to allow parallel processing
std::ifstream disk_file(filename, std::ios::in | std::ios::binary);
uint128_t line_point = ReadLinePoint(disk_file, depth, position);
uint128_t line_point;

if (!disk_file) {
// No disk file passed in, so we assume here we are doing parallel reads
// Create individual file handles to allow parallel processing
std::ifstream disk_file_parallel(filename, std::ios::in | std::ios::binary);
line_point = ReadLinePoint(disk_file_parallel, depth, position);
} else {
line_point = ReadLinePoint(*disk_file, depth, position);
}
std::pair<uint64_t, uint64_t> xy = Encoding::LinePointToSquare(line_point);

if (depth == 1) {
Expand All @@ -649,14 +661,22 @@ class DiskProver {
ret.emplace_back(xy.first, k); // x
return ret;
} else {
auto left_fut=std::async(std::launch::async, &DiskProver::GetInputs,this, (uint64_t)xy.second, (uint8_t)(depth - 1));
auto right_fut=std::async(std::launch::async, &DiskProver::GetInputs,this, (uint64_t)xy.first, (uint8_t)(depth - 1));
std::vector<Bits> left = left_fut.get(); // y
std::vector<Bits> right = right_fut.get(); // x
std::vector<Bits> left, right;
if (!disk_file) {
// no disk_file, so we do parallel reads here
auto left_fut=std::async(std::launch::async, &DiskProver::GetInputs,this, (uint64_t)xy.second, (uint8_t)(depth - 1), nullptr);
auto right_fut=std::async(std::launch::async, &DiskProver::GetInputs,this, (uint64_t)xy.first, (uint8_t)(depth - 1), nullptr);
left = left_fut.get(); // y
right = right_fut.get(); // x
} else {
left = GetInputs(xy.second, depth - 1, disk_file); // y
right = GetInputs(xy.first, depth - 1, disk_file); // x
}
left.insert(left.end(), right.begin(), right.end());
return left;
}
}

};

#endif // SRC_CPP_PROVER_DISK_HPP_
1 change: 1 addition & 0 deletions tests/test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -509,6 +509,7 @@ void TestProofOfSpace(
picosha2::hash256(hash_input.begin(), hash_input.end(), hash.begin(), hash.end());
vector<LargeBits> qualities = prover.GetQualitiesForChallenge(hash.data());
Verifier verifier = Verifier();

for (uint32_t index = 0; index < qualities.size(); index++) {
LargeBits proof = prover.GetFullProof(hash.data(), index);
proof.ToBytes(proof_data);
Expand Down
23 changes: 21 additions & 2 deletions tests/test_python_bindings.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ def test_k_21(self):
pr = DiskProver(str(Path("myplot.dat")))

total_proofs: int = 0
total_proofs2: int = 0
iterations: int = 5000

v = Verifier()
Expand All @@ -79,11 +80,25 @@ def test_k_21(self):
)
assert computed_quality == quality
total_proofs += 1
for index, quality in enumerate(pr.get_qualities_for_challenge(challenge)):
proof = pr.get_full_proof(challenge, index, parallel_read=False)
assert len(proof) == 8 * pr.get_size()
computed_quality = v.validate_proof(
plot_seed, pr.get_size(), challenge, proof
)
assert computed_quality == quality
total_proofs2 += 1

print(
f"total proofs {total_proofs} out of {iterations}\
{total_proofs / iterations}"
)
print(
f"total proofs (sequential reads) {total_proofs2} out of {iterations}\
{total_proofs2 / iterations}"
)

assert total_proofs2 == total_proofs
assert total_proofs > 4000
assert total_proofs < 6000
pr = None
Expand Down Expand Up @@ -125,7 +140,9 @@ def test_faulty_plot_doesnt_crash(self):
all_data = bytearray(f.read())
f.close()
assert len(all_data) > 20000000
all_data_bad = all_data[:20000000] + bytearray(token_bytes(10000)) + all_data[20100000:]
all_data_bad = (
all_data[:20000000] + bytearray(token_bytes(10000)) + all_data[20100000:]
)
f_bad = open("myplotbad.dat", "wb")
f_bad.write(all_data_bad)
f_bad.close()
Expand All @@ -141,7 +158,9 @@ def test_faulty_plot_doesnt_crash(self):
print(i)
challenge = sha256(i.to_bytes(4, "big")).digest()
try:
for index, quality in enumerate(pr.get_qualities_for_challenge(challenge)):
for index, quality in enumerate(
pr.get_qualities_for_challenge(challenge)
):
proof = pr.get_full_proof(challenge, index)
computed_quality = v.validate_proof(
plot_id, pr.get_size(), challenge, proof
Expand Down

0 comments on commit 640713a

Please sign in to comment.