Skip to content

Commit

Permalink
Halve memory usage for remote cache writes. (pantsbuild#12083)
Browse files Browse the repository at this point in the history
Previously we were inadvertantly double copying. We still buffer too
much in the case of large blobs but this does help.
  • Loading branch information
jsirois authored May 16, 2021
1 parent 5385d3f commit 3270555
Show file tree
Hide file tree
Showing 3 changed files with 7 additions and 12 deletions.
2 changes: 1 addition & 1 deletion src/rust/engine/fs/store/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -557,7 +557,7 @@ impl Store {
})
.await?;
match maybe_bytes {
Some(bytes) => remote.store_bytes(&bytes).await,
Some(bytes) => remote.store_bytes(bytes).await,
None => Err(format!(
"Failed to upload digest {:?}: Not found in local store",
digest
Expand Down
7 changes: 1 addition & 6 deletions src/rust/engine/fs/store/src/remote.rs
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ impl ByteStore {
})
}

pub async fn store_bytes(&self, bytes: &[u8]) -> Result<Digest, String> {
pub async fn store_bytes(&self, bytes: Bytes) -> Result<Digest, String> {
let len = bytes.len();
let digest = Digest::of_bytes(&bytes);
let resource_name = format!(
Expand All @@ -110,11 +110,6 @@ impl ByteStore {
let resource_name = resource_name.clone();
let chunk_size_bytes = store.chunk_size_bytes;

// NOTE(tonic): The call into the Tonic library wants the slice to last for the 'static
// lifetime but the slice passed into this method generally points into the shared memory
// of the LMDB store which is on the other side of the FFI boundary.
let bytes = Bytes::copy_from_slice(bytes);

let stream = futures::stream::unfold((0, false), move |(offset, has_sent_any)| {
if offset >= bytes.len() && has_sent_any {
futures::future::ready(None)
Expand Down
10 changes: 5 additions & 5 deletions src/rust/engine/fs/store/src/remote_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ async fn write_file_one_chunk() {

let store = new_byte_store(&cas);
assert_eq!(
store.store_bytes(&testdata.bytes()).await,
store.store_bytes(testdata.bytes()).await,
Ok(testdata.digest())
);

Expand Down Expand Up @@ -168,7 +168,7 @@ async fn write_file_multiple_chunks() {
let fingerprint = big_file_fingerprint();

assert_eq!(
store.store_bytes(&all_the_henries).await,
store.store_bytes(all_the_henries.clone()).await,
Ok(big_file_digest())
);

Expand Down Expand Up @@ -198,7 +198,7 @@ async fn write_empty_file() {

let store = new_byte_store(&cas);
assert_eq!(
store.store_bytes(&empty_file.bytes()).await,
store.store_bytes(empty_file.bytes()).await,
Ok(empty_file.digest())
);

Expand All @@ -215,7 +215,7 @@ async fn write_file_errors() {

let store = new_byte_store(&cas);
let error = store
.store_bytes(&TestData::roland().bytes())
.store_bytes(TestData::roland().bytes())
.await
.expect_err("Want error");
assert!(
Expand All @@ -238,7 +238,7 @@ async fn write_connection_error() {
)
.unwrap();
let error = store
.store_bytes(&TestData::roland().bytes())
.store_bytes(TestData::roland().bytes())
.await
.expect_err("Want error");
assert!(
Expand Down

0 comments on commit 3270555

Please sign in to comment.