Skip to content

Commit

Permalink
Clean up code, implement fallback mode
Browse files Browse the repository at this point in the history
  • Loading branch information
jlongster committed Aug 8, 2021
1 parent dcbd246 commit 31d076b
Show file tree
Hide file tree
Showing 11 changed files with 522 additions and 305 deletions.
33 changes: 14 additions & 19 deletions src/blocked-file.js
Original file line number Diff line number Diff line change
@@ -1,22 +1,5 @@
import * as perf from 'perf-deets';

let LOCK_TYPES = {
NONE: 0,
SHARED: 1,
RESERVED: 2,
PENDING: 3,
EXCLUSIVE: 4
};

function getPageSize(bufferView) {
// See 1.3.2 on https://www.sqlite.org/fileformat.html The page size
// is stored as a 2 byte integer at the 16th byte. It's stored as
// big-endian so the first byte is the larger one. Combine it into a
// single integer.
let int1 = bufferView[16];
let int2 = bufferView[17];
return (int1 << 8) + int2;
}
import { getPageSize, LOCK_TYPES } from './sqlite-util';

function range(start, end, step) {
let r = [];
Expand Down Expand Up @@ -229,7 +212,9 @@ export class File {
}

write(bufferView, offset, length, position) {
// console.log('writing', this.filename, offset, length, position);
// console.log('writing', this.filename, offset, length,
// position);
perf.record('write');

if (this.meta.blockSize == null) {
// We don't have a block size yet. The first write MUST be the
Expand Down Expand Up @@ -317,13 +302,21 @@ export class File {

this.bufferChunks(allWrites);

perf.endRecording('write');

if (position + length > this.meta.size) {
this.setattr({ size: position + length });
}

return length;
}

readIfFallback() {
if (this.ops.readIfFallback) {
return this.ops.readIfFallback();
}
}

lock(lockType) {
if (this.ops.lock(lockType)) {
if (lockType >= LOCK_TYPES.RESERVED) {
Expand Down Expand Up @@ -397,7 +390,9 @@ export class File {
}
}

perf.record('writeBlocks');
this.ops.writeBlocks([...this.buffer.values()], this.meta.blockSize);
perf.endRecording('writeBlocks');
}

if (this._metaDirty) {
Expand Down
4 changes: 0 additions & 4 deletions src/blocked-fs.js
Original file line number Diff line number Diff line change
Expand Up @@ -136,10 +136,6 @@ export default class BlockedFS {
};
}

async init() {
await this.backend.init();
}

mount() {
return this.createNode(null, '/', 16384 /* dir */ | 511 /* 0777 */, 0);
}
Expand Down
23 changes: 20 additions & 3 deletions src/examples/large-data/main.worker.js
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ async function init() {

if (typeof SharedArrayBuffer === 'undefined') {
output(
'<code>SharedArrayBuffer</code> is not available in your browser. It is required, but in the future we will provide a fallback.'
'<code>SharedArrayBuffer</code> is not available in your browser. Falling back.'
);
}

Expand Down Expand Up @@ -61,7 +61,13 @@ function closeDatabase() {
async function getDatabase() {
await init();
if (_db == null) {
_db = new SQL.Database(`/blocked/${dbName}`, { filename: true });
let path = `/blocked/${dbName}`;

let { node } = SQL.FS.open(path, 'a+');
await node.contents.readIfFallback();

_db = new SQL.Database(path, { filename: true });

// Should ALWAYS use the journal in memory mode. Doesn't make
// any sense at all to write the journal. It's way slower
_db.exec(`
Expand Down Expand Up @@ -97,8 +103,19 @@ async function populate() {
);
count = 100000;
}
// count = Math.random() * 100 + 1000;

queries.populate(await getDatabase(), output, uuid, count);
let db = await getDatabase();

if (recordProfile) {
BFS.backend.startProfile();
}

queries.populate(db, output, uuid, count);

if (recordProfile) {
BFS.backend.stopProfile();
}

let { node } = SQL.FS.lookupPath(`/blocked/${dbName}`);
let file = node.contents;
Expand Down
8 changes: 4 additions & 4 deletions src/examples/large-data/queries.js
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ function formatNumber(num) {
return new Intl.NumberFormat('en-US').format(num);
}

function populate(db, output, uuid, count = 10) {
function populate(db, output, uuid, count = 1000000) {
output('Clearing existing data');
db.exec(`
BEGIN TRANSACTION;
Expand Down Expand Up @@ -74,8 +74,8 @@ async function randomReads(db, output) {

let canRebind = !!stmt.reset;

for (let i = 0; i < 8; i++) {
let off = i * 10000;
for (let i = 0; i < 100; i++) {
let off = i * 300;
if (canRebind) {
stmt.bind([off]);
}
Expand All @@ -85,7 +85,7 @@ async function randomReads(db, output) {
// better-sqlite3 doesn't allow you to rebind the same
// statement. This is probably a tiny perf hit, but negligable
// for what we're measuring (it's already so much faster anyway)
stmt = db.prepare(`SELECT key FROM kv LIMIT 1000 OFFSET ${off}`);
stmt = db.prepare(`SELECT key FROM kv LIMIT 2000 OFFSET ${off}`);
let rows = stmt.all();
console.log(rows[rows.length - 1]);
} else {
Expand Down
Loading

0 comments on commit 31d076b

Please sign in to comment.