Skip to content

Commit

Permalink
Improve allocation ranges -> addresses conversion
Browse files Browse the repository at this point in the history
This moves away from adding all allocated storage addresses for each range which is VERY slow on large nodes spanning multiple pages.  It now only requests target records for specific record numbers, and calculated counts only
  • Loading branch information
appy-one committed Oct 2, 2023
1 parent e3fdade commit 3756aab
Showing 1 changed file with 36 additions and 9 deletions.
45 changes: 36 additions & 9 deletions src/storage/binary/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2414,12 +2414,31 @@ class NodeAllocation {

get addresses(): StorageAddress[] {
const addresses = [] as StorageAddress[];
this.ranges.forEach(range => {
for (const range of this.ranges) {
for (let i = 0; i < range.length; i++) {
const address = new StorageAddress(range.pageNr, range.recordNr + i);
addresses.push(address);
}
});
}
return addresses;
}
/**
* Gets individual record addresses from current allocation
* @param start index to start
* @param end index to stop (does not include this record)
*/
getAddresses(start: number, end: number) {
const addresses = [] as StorageAddress[];
let nr = 0;
for (const range of this.ranges) {
for (let i = 0; i < range.length && nr < end; i++, nr++) {
if (nr >= start && nr < end) {
const address = new StorageAddress(range.pageNr, range.recordNr + i);
addresses.push(address);
}
}
if (nr >= end) { break; }
}
return addresses;
}

Expand Down Expand Up @@ -3056,7 +3075,7 @@ class NodeReader {
if (this.recordInfo.hasKeyIndex) {
return createStreamFromBinaryTree();
}
else if (this.recordInfo.allocation.addresses.length === 1) {
else if (this.recordInfo.allocation.totalAddresses === 1) {
// We have all data in memory (small record)
return createStreamFromLinearData(this.recordInfo.startData, true);
}
Expand Down Expand Up @@ -3371,6 +3390,7 @@ class NodeReader {
});
}

// #allocatedAddresses = null as StorageAddress[] | null;
async _treeDataWriter(binary: number[] | Buffer, index: number) {
if (binary instanceof Array) {
binary = Buffer.from(binary);
Expand All @@ -3386,7 +3406,11 @@ class NodeReader {
nr: Math.floor((headerLength + index + length) / recordSize),
offset: (headerLength + index + length) % recordSize,
};
const writeRecords = this.recordInfo.allocation.addresses.slice(startRecord.nr, endRecord.nr + 1);
// if (!this.#allocatedAddresses) {
// this.#allocatedAddresses = this.recordInfo.allocation.addresses;
// }
// const writeRecords = this.#allocatedAddresses.slice(startRecord.nr, endRecord.nr + 1);
const writeRecords = this.recordInfo.allocation.getAddresses(startRecord.nr, endRecord.nr + 1);
const writeRanges = NodeAllocation.fromAdresses(writeRecords).ranges;
const writes = [];
let bOffset = 0;
Expand Down Expand Up @@ -3421,11 +3445,15 @@ class NodeReader {
nr: Math.floor((headerLength + index + length) / recordSize),
offset: (headerLength + index + length) % recordSize,
};
const readRecords = this.recordInfo.allocation.addresses.slice(startRecord.nr, endRecord.nr + 1);
// if (!this.#allocatedAddresses) {
// this.#allocatedAddresses = this.recordInfo.allocation.addresses;
// }
// const readRecords = this.#allocatedAddresses.slice(startRecord.nr, endRecord.nr + 1);
const readRecords = this.recordInfo.allocation.getAddresses(startRecord.nr, endRecord.nr + 1);
if (readRecords.length === 0) {
throw new Error(
`Attempt to read non-existing records of path "/${this.recordInfo.path}": ${startRecord.nr} to ${endRecord.nr + 1} ` +
`for index ${index} + ${length} bytes. Node has ${this.recordInfo.allocation.addresses.length} allocated records ` +
`for index ${index} + ${length} bytes. Node has ${this.recordInfo.allocation.totalAddresses} allocated records ` +
`in the following ranges: ` + this.recordInfo.allocation.toString()
);
}
Expand Down Expand Up @@ -3468,7 +3496,6 @@ class NodeReader {
let view = new DataView(data.buffer);
let offset = 1;
const firstRange = new StorageAddressRange(this.address.pageNr, this.address.recordNr, 1);
/** @type {StorageAddressRange[]} */
const ranges = [firstRange];
const allocation = new NodeAllocation(ranges);
let readingRecordIndex = 0;
Expand All @@ -3478,7 +3505,7 @@ class NodeReader {
if (offset + 9 + 2 >= data.length) {
// Read more data (next record)
readingRecordIndex++;
const address = allocation.addresses[readingRecordIndex];
const [address] = allocation.getAddresses(readingRecordIndex, readingRecordIndex + 1);
const fileIndex = this.storage.getRecordFileIndex(address.pageNr, address.recordNr);
const moreData = new Uint8Array(bytesPerRecord);
await this.storage.readData(fileIndex, moreData.buffer);
Expand Down Expand Up @@ -4045,7 +4072,7 @@ async function _writeNode(storage: AceBaseStorage, path: string, value: any, loc
// Create a B+tree
const fillFactor =
isArray || serialized.every(kvp => typeof kvp.key === 'string' && /^[0-9]+$/.test(kvp.key))
? BINARY_TREE_FILL_FACTOR_50
? BINARY_TREE_FILL_FACTOR_50 // TODO: Consider removing this, might be better performing now with 95% instead!
: BINARY_TREE_FILL_FACTOR_95;

const treeBuilder = new BPlusTreeBuilder(true, fillFactor);
Expand Down

0 comments on commit 3756aab

Please sign in to comment.