A simple database, small but complete, so call Sparrow DB
目前,主要支持创建表、删除表、插入数据、删除数据、更新数据、查询数据这些基本的操作。
CREATE TABLE [ IF NOT EXISTS ]
table_name (
{ column_name data_type [ COMMENT comment ] }
[, ...]
)
[ COMMENT table_comment ]
DROP TABLE [ IF EXISTS ] table_name
INSERT INTO table_name ( field1, field2,...fieldN )
VALUES( value1, value2,...valueN );
DELETE FROM table_name [ WHERE condition ]
UPDATE table_name SET field1=new-value1, field2=new-value2 [WHERE Clause]
SELECT column_name,column_name
FROM table_name
[WHERE Clause]
[LIMIT N][ OFFSET M]
SQL语法采用Antlr4设计,部分语法如下:
grammar SparrowSQL;
tokens {
DELIMITER
}
singleStatement
: statement EOF
;
statement
: query #statementDefault
| CREATE TABLE (IF NOT EXISTS)? qualifiedName
'(' tableElement (',' tableElement)* ')'
(COMMENT string)? #createTable
| DROP TABLE (IF EXISTS)? qualifiedName #dropTable
| INSERT INTO qualifiedName columnAliases? query #insertInto
| DELETE FROM qualifiedName (WHERE booleanExpression)? #delete
;
SQL parser 将SQL语法转化为AST, 可以通过Antlr4 Visitor模式实现,核心Parse逻辑如下:
public class SqlParser {
public Statement createStatement(String sql) {
return (Statement) invokeParser("statement", sql, SparrowSQLParser::singleStatement);
}
private Node invokeParser(String name, String sql, Function<SparrowSQLParser, ParserRuleContext> parseFunction) {
try {
SparrowSQLLexer lexer = new SparrowSQLLexer(new CaseInsensitiveStream(CharStreams.fromString(sql)));
CommonTokenStream tokenStream = new CommonTokenStream(lexer);
SparrowSQLParser parser = new SparrowSQLParser(tokenStream);
// Override the default error strategy to not attempt inserting or deleting a token.
// Otherwise, it messes up error reporting
parser.setErrorHandler(new DefaultErrorStrategy() {
@Override
public Token recoverInline(Parser recognizer) throws RecognitionException {
if (nextTokensContext == null) {
throw new InputMismatchException(recognizer);
} else {
throw new InputMismatchException(recognizer, nextTokensState, nextTokensContext);
}
}
});
parser.removeErrorListeners();
ParserRuleContext tree;
try {
// first, try parsing with potentially faster SLL mode
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
tree = parseFunction.apply(parser);
} catch (ParseCancellationException ex) {
// if we fail, parse with LL mode
tokenStream.reset(); // rewind input stream
parser.reset();
parser.getInterpreter().setPredictionMode(PredictionMode.LL);
tree = parseFunction.apply(parser);
}
return new AstBuilder(new ParsingOptions()).visit(tree);
} catch (StackOverflowError e) {
throw new ParsingException(name + " is too large (stack overflow while parsing)");
}
}
}
SQL查询引擎这块采用经典的火山模型,该计算模型将关系代数中每一种操作抽象为一个 Operator,将整个 SQL 构建成一个 Operator 树,查询树自顶向下的调用next()接口,数据则自底向上的被拉取处理。
比如查询到执行计划实现,采用火山模型,实现如下所示:
public class LogicalPlanner {
public PlanNode planStatement(Statement statement, TransactionID transactionId) {
if (statement instanceof Insert) {
return createInsertPlan((Insert) statement, transactionId);
} else if (statement instanceof Delete) {
return createDeletePlan((Delete) statement, transactionId);
} else if (statement instanceof Query) {
return createQueryPlan((Query) statement, transactionId);
} else if (statement instanceof CreateTable) {
return createTablePlan((CreateTable) statement, transactionId);
}
throw new RuntimeException("not support this statement plan");
}
private PlanNode createQueryPlan(Query statement, TransactionID transactionId) {
QuerySpecification querySpecification = (QuerySpecification) statement.getQueryBody();
PlanNode root = null;
root = getTableScanNode(querySpecification.getFrom(), transactionId);
root = getWhereNode(root, querySpecification.getWhere());
root = getLimitNode(root, querySpecification.getLimit());
root = getOrderByNode(root, querySpecification.getOrderBy());
root = getGroupByNode(root, querySpecification.getGroupBy());
return getProjectNode(root, querySpecification.getSelect());
}
}