Skip to content

Commit

Permalink
limits read access into Packet data to Packet.meta.size (solana-labs#…
Browse files Browse the repository at this point in the history
…25484)

Bytes past Packet.meta.size are not valid to read from.

The commit makes the buffer field private and instead provides two
methods:
* Packet::data() which returns an immutable reference to the underlying
  buffer up to Packet.meta.size. The rest of the buffer is not valid to
  read from.
* Packet::buffer_mut() which returns a mutable reference to the entirety
  of the underlying buffer to write into. The caller is responsible to
  update Packet.meta.size after writing to the buffer.
  • Loading branch information
behzadnouri authored May 25, 2022
1 parent f10c80b commit 8806845
Show file tree
Hide file tree
Showing 20 changed files with 112 additions and 121 deletions.
2 changes: 1 addition & 1 deletion bench-streamer/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
for p in packet_batch.iter() {
let a = p.meta.socket_addr();
assert!(p.meta.size <= PACKET_DATA_SIZE);
send.send_to(&p.data[..p.meta.size], &a).unwrap();
send.send_to(p.data(), &a).unwrap();
num += 1;
}
assert_eq!(num, 10);
Expand Down
2 changes: 1 addition & 1 deletion core/src/banking_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -508,7 +508,7 @@ impl BankingStage {
.iter()
.filter_map(|p| {
if !p.meta.forwarded() && data_budget.take(p.meta.size) {
Some(p.data[..p.meta.size].to_vec())
Some(p.data().to_vec())
} else {
None
}
Expand Down
3 changes: 1 addition & 2 deletions core/src/packet_hasher.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,7 @@ impl Default for PacketHasher {

impl PacketHasher {
pub(crate) fn hash_packet(&self, packet: &Packet) -> u64 {
let size = packet.data.len().min(packet.meta.size);
self.hash_data(&packet.data[..size])
self.hash_data(packet.data())
}

pub(crate) fn hash_shred(&self, shred: &Shred) -> u64 {
Expand Down
9 changes: 5 additions & 4 deletions core/src/repair_response.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,14 @@ pub fn repair_response_packet_from_bytes(
nonce: Nonce,
) -> Option<Packet> {
let mut packet = Packet::default();
packet.meta.size = bytes.len() + SIZE_OF_NONCE;
if packet.meta.size > packet.data.len() {
let size = bytes.len() + SIZE_OF_NONCE;
if size > packet.buffer_mut().len() {
return None;
}
packet.meta.size = size;
packet.meta.set_socket_addr(dest);
packet.data[..bytes.len()].copy_from_slice(&bytes);
let mut wr = io::Cursor::new(&mut packet.data[bytes.len()..]);
packet.buffer_mut()[..bytes.len()].copy_from_slice(&bytes);
let mut wr = io::Cursor::new(&mut packet.buffer_mut()[bytes.len()..]);
bincode::serialize_into(&mut wr, &nonce).expect("Buffer not large enough to fit nonce");
Some(packet)
}
Expand Down
6 changes: 3 additions & 3 deletions core/src/serve_repair.rs
Original file line number Diff line number Diff line change
Expand Up @@ -814,7 +814,7 @@ mod tests {
.into_iter()
.filter_map(|p| {
assert_eq!(repair_response::nonce(p).unwrap(), nonce);
Shred::new_from_serialized_shred(p.data.to_vec()).ok()
Shred::new_from_serialized_shred(p.data().to_vec()).ok()
})
.collect();
assert!(!rv.is_empty());
Expand Down Expand Up @@ -898,7 +898,7 @@ mod tests {
.into_iter()
.filter_map(|p| {
assert_eq!(repair_response::nonce(p).unwrap(), nonce);
Shred::new_from_serialized_shred(p.data.to_vec()).ok()
Shred::new_from_serialized_shred(p.data().to_vec()).ok()
})
.collect();
assert_eq!(rv[0].index(), 1);
Expand Down Expand Up @@ -1347,7 +1347,7 @@ mod tests {

fn verify_responses<'a>(request: &ShredRepairType, packets: impl Iterator<Item = &'a Packet>) {
for packet in packets {
let shred_payload = packet.data.to_vec();
let shred_payload = packet.data().to_vec();
let shred = Shred::new_from_serialized_shred(shred_payload).unwrap();
request.verify_response(&shred);
}
Expand Down
8 changes: 4 additions & 4 deletions core/src/sigverify_shreds.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ pub mod tests {

let keypair = Keypair::new();
shred.sign(&keypair);
batches[0][0].data[0..shred.payload().len()].copy_from_slice(shred.payload());
batches[0][0].buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload());
batches[0][0].meta.size = shred.payload().len();

let mut shred = Shred::new_from_data(
Expand All @@ -134,7 +134,7 @@ pub mod tests {
0xc0de,
);
shred.sign(&keypair);
batches[1][0].data[0..shred.payload().len()].copy_from_slice(shred.payload());
batches[1][0].buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload());
batches[1][0].meta.size = shred.payload().len();

let expected: HashSet<u64> = [0xc0de_dead, 0xdead_c0de].iter().cloned().collect();
Expand Down Expand Up @@ -169,7 +169,7 @@ pub mod tests {
0xc0de,
);
shred.sign(&leader_keypair);
batches[0][0].data[0..shred.payload().len()].copy_from_slice(shred.payload());
batches[0][0].buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload());
batches[0][0].meta.size = shred.payload().len();

let mut shred = Shred::new_from_data(
Expand All @@ -184,7 +184,7 @@ pub mod tests {
);
let wrong_keypair = Keypair::new();
shred.sign(&wrong_keypair);
batches[0][1].data[0..shred.payload().len()].copy_from_slice(shred.payload());
batches[0][1].buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload());
batches[0][1].meta.size = shred.payload().len();

let num_packets = solana_perf::sigverify::count_packets_in_batches(&batches);
Expand Down
7 changes: 2 additions & 5 deletions core/src/unprocessed_packet_batches.rs
Original file line number Diff line number Diff line change
Expand Up @@ -356,14 +356,11 @@ pub fn deserialize_packets<'a>(
/// Read the transaction message from packet data
pub fn packet_message(packet: &Packet) -> Result<&[u8], DeserializedPacketError> {
let (sig_len, sig_size) =
decode_shortu16_len(&packet.data).map_err(DeserializedPacketError::ShortVecError)?;
decode_shortu16_len(packet.data()).map_err(DeserializedPacketError::ShortVecError)?;
sig_len
.checked_mul(size_of::<Signature>())
.and_then(|v| v.checked_add(sig_size))
.map(|msg_start| {
let msg_end = packet.meta.size;
&packet.data[msg_start..msg_end]
})
.and_then(|msg_start| packet.data().get(msg_start..))
.ok_or(DeserializedPacketError::SignatureOverflowed(sig_size))
}

Expand Down
8 changes: 2 additions & 6 deletions core/src/window_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ use {
solana_perf::packet::{Packet, PacketBatch},
solana_rayon_threadlimit::get_thread_count,
solana_runtime::{bank::Bank, bank_forks::BankForks},
solana_sdk::{clock::Slot, packet::PACKET_DATA_SIZE, pubkey::Pubkey},
solana_sdk::{clock::Slot, pubkey::Pubkey},
std::{
cmp::Reverse,
collections::{HashMap, HashSet},
Expand Down Expand Up @@ -363,11 +363,7 @@ where
inc_new_counter_debug!("streamer-recv_window-invalid_or_unnecessary_packet", 1);
return None;
}
// shred fetch stage should be sending packets
// with sufficiently large buffers. Needed to ensure
// call to `new_from_serialized_shred` is safe.
assert_eq!(packet.data.len(), PACKET_DATA_SIZE);
let serialized_shred = packet.data.to_vec();
let serialized_shred = packet.data().to_vec();
let shred = Shred::new_from_serialized_shred(serialized_shred).ok()?;
if !shred_filter(&shred, working_bank.clone(), last_root) {
return None;
Expand Down
4 changes: 2 additions & 2 deletions gossip/tests/gossip.rs
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ pub fn cluster_info_retransmit() {
let retransmit_peers: Vec<_> = peers.iter().collect();
retransmit_to(
&retransmit_peers,
&p.data[..p.meta.size],
p.data(),
&tn1,
false,
&SocketAddrSpace::Unspecified,
Expand All @@ -270,7 +270,7 @@ pub fn cluster_info_retransmit() {
.map(|s| {
let mut p = Packet::default();
s.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
let res = s.recv_from(&mut p.data);
let res = s.recv_from(p.buffer_mut());
res.is_err() //true if failed to receive the retransmit packet
})
.collect();
Expand Down
18 changes: 9 additions & 9 deletions ledger/src/shred.rs
Original file line number Diff line number Diff line change
Expand Up @@ -287,7 +287,7 @@ impl Shred {
pub fn copy_to_packet(&self, packet: &mut Packet) {
let payload = self.payload();
let size = payload.len();
packet.data[..size].copy_from_slice(&payload[..]);
packet.buffer_mut()[..size].copy_from_slice(&payload[..]);
packet.meta.size = size;
}

Expand Down Expand Up @@ -575,7 +575,7 @@ pub fn get_shred_slot_index_type(
}
};

let shred_type = match ShredType::try_from(p.data[OFFSET_OF_SHRED_TYPE]) {
let shred_type = match ShredType::try_from(p.data()[OFFSET_OF_SHRED_TYPE]) {
Err(_) => {
stats.bad_shred_type += 1;
return None;
Expand Down Expand Up @@ -733,7 +733,7 @@ mod tests {
let shred = Shred::new_from_data(10, 0, 1000, &[1, 2, 3], ShredFlags::empty(), 0, 1, 0);
let mut packet = Packet::default();
shred.copy_to_packet(&mut packet);
let shred_res = Shred::new_from_serialized_shred(packet.data.to_vec());
let shred_res = Shred::new_from_serialized_shred(packet.data().to_vec());
assert_matches!(
shred.parent(),
Err(Error::InvalidParentOffset {
Expand Down Expand Up @@ -825,7 +825,7 @@ mod tests {
200, // version
);
shred.copy_to_packet(&mut packet);
packet.data[OFFSET_OF_SHRED_TYPE] = u8::MAX;
packet.buffer_mut()[OFFSET_OF_SHRED_TYPE] = u8::MAX;

assert_eq!(None, get_shred_slot_index_type(&packet, &mut stats));
assert_eq!(1, stats.bad_shred_type);
Expand Down Expand Up @@ -892,13 +892,13 @@ mod tests {
data.iter().skip(skip).copied()
});
let mut packet = Packet::default();
packet.data[..payload.len()].copy_from_slice(&payload);
packet.buffer_mut()[..payload.len()].copy_from_slice(&payload);
packet.meta.size = payload.len();
assert_eq!(shred.bytes_to_store(), payload);
assert_eq!(shred, Shred::new_from_serialized_shred(payload).unwrap());
assert_eq!(
shred.reference_tick(),
Shred::reference_tick_from_data(&packet.data).unwrap()
Shred::reference_tick_from_data(packet.data()).unwrap()
);
assert_eq!(Shred::get_slot_from_packet(&packet), Some(shred.slot()));
assert_eq!(
Expand Down Expand Up @@ -933,13 +933,13 @@ mod tests {
assert_matches!(shred.sanitize(), Ok(()));
let payload = bs58_decode(PAYLOAD);
let mut packet = Packet::default();
packet.data[..payload.len()].copy_from_slice(&payload);
packet.buffer_mut()[..payload.len()].copy_from_slice(&payload);
packet.meta.size = payload.len();
assert_eq!(shred.bytes_to_store(), payload);
assert_eq!(shred, Shred::new_from_serialized_shred(payload).unwrap());
assert_eq!(
shred.reference_tick(),
Shred::reference_tick_from_data(&packet.data).unwrap()
Shred::reference_tick_from_data(packet.data()).unwrap()
);
assert_eq!(Shred::get_slot_from_packet(&packet), Some(shred.slot()));
assert_eq!(
Expand Down Expand Up @@ -981,7 +981,7 @@ mod tests {
parity_shard.iter().skip(skip).copied()
});
let mut packet = Packet::default();
packet.data[..payload.len()].copy_from_slice(&payload);
packet.buffer_mut()[..payload.len()].copy_from_slice(&payload);
packet.meta.size = payload.len();
assert_eq!(shred.bytes_to_store(), payload);
assert_eq!(shred, Shred::new_from_serialized_shred(payload).unwrap());
Expand Down
26 changes: 9 additions & 17 deletions ledger/src/sigverify_shreds.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,12 +62,9 @@ pub fn verify_shred_cpu(packet: &Packet, slot_leaders: &HashMap<u64, [u8; 32]>)
};
trace!("slot {}", slot);
let pubkey = slot_leaders.get(&slot)?;
if packet.meta.size < sig_end {
return Some(0);
}
let signature = Signature::new(&packet.data[sig_start..sig_end]);
let signature = Signature::new(packet.data().get(sig_start..sig_end)?);
trace!("signature {}", signature);
if !signature.verify(pubkey, &packet.data[msg_start..msg_end]) {
if !signature.verify(pubkey, packet.data().get(msg_start..msg_end)?) {
return Some(0);
}
Some(1)
Expand Down Expand Up @@ -307,14 +304,9 @@ fn sign_shred_cpu(keypair: &Keypair, packet: &mut Packet) {
let sig_start = 0;
let sig_end = sig_start + size_of::<Signature>();
let msg_start = sig_end;
let msg_end = packet.meta.size;
assert!(
packet.meta.size >= msg_end,
"packet is not large enough for a signature"
);
let signature = keypair.sign_message(&packet.data[msg_start..msg_end]);
let signature = keypair.sign_message(&packet.data()[msg_start..]);
trace!("signature {:?}", signature);
packet.data[0..sig_end].copy_from_slice(signature.as_ref());
packet.buffer_mut()[..sig_end].copy_from_slice(signature.as_ref());
}

pub fn sign_shreds_cpu(keypair: &Keypair, batches: &mut [PacketBatch]) {
Expand Down Expand Up @@ -443,7 +435,7 @@ pub fn sign_shreds_gpu(
let sig_ix = packet_ix + num_packets;
let sig_start = sig_ix * sig_size;
let sig_end = sig_start + sig_size;
packet.data[0..sig_size]
packet.buffer_mut()[..sig_size]
.copy_from_slice(&signatures_out[sig_start..sig_end]);
});
});
Expand Down Expand Up @@ -476,7 +468,7 @@ pub mod tests {
let keypair = Keypair::new();
shred.sign(&keypair);
trace!("signature {}", shred.signature());
packet.data[0..shred.payload().len()].copy_from_slice(shred.payload());
packet.buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload());
packet.meta.size = shred.payload().len();

let leader_slots = [(slot, keypair.pubkey().to_bytes())]
Expand Down Expand Up @@ -520,7 +512,7 @@ pub mod tests {
let keypair = Keypair::new();
shred.sign(&keypair);
batches[0].resize(1, Packet::default());
batches[0][0].data[0..shred.payload().len()].copy_from_slice(shred.payload());
batches[0][0].buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload());
batches[0][0].meta.size = shred.payload().len();

let leader_slots = [(slot, keypair.pubkey().to_bytes())]
Expand Down Expand Up @@ -574,7 +566,7 @@ pub mod tests {
let keypair = Keypair::new();
shred.sign(&keypair);
batches[0].resize(1, Packet::default());
batches[0][0].data[0..shred.payload().len()].copy_from_slice(shred.payload());
batches[0][0].buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload());
batches[0][0].meta.size = shred.payload().len();

let leader_slots = [
Expand Down Expand Up @@ -685,7 +677,7 @@ pub mod tests {
0xc0de,
);
batches[0].resize(1, Packet::default());
batches[0][0].data[0..shred.payload().len()].copy_from_slice(shred.payload());
batches[0][0].buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload());
batches[0][0].meta.size = shred.payload().len();

let pubkeys = [
Expand Down
Loading

0 comments on commit 8806845

Please sign in to comment.