Skip to content

Commit

Permalink
plan: make explain result more explicit (pingcap#2022)
Browse files Browse the repository at this point in the history
  • Loading branch information
hanfei1991 authored and ngaut committed Nov 17, 2016
1 parent 0b35f48 commit 2f1cd5b
Show file tree
Hide file tree
Showing 2 changed files with 128 additions and 69 deletions.
132 changes: 81 additions & 51 deletions executor/explain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,11 @@ func (s *testSuite) TestExplain(c *C) {
"table": "t1",
"desc": false,
"keep order": false,
"access condition": null,
"count of pushed aggregate functions": 0,
"limit": 0
"push down info": {
"limit": 0,
"access conditions": null,
"filter conditions": null
}
}`,
},
{
Expand All @@ -55,9 +57,11 @@ func (s *testSuite) TestExplain(c *C) {
"desc": false,
"out of order": false,
"double read": false,
"access condition": null,
"count of pushed aggregate functions": 0,
"limit": 0
"push down info": {
"limit": 0,
"access conditions": null,
"filter conditions": null
}
}`,
},
{
Expand All @@ -77,9 +81,11 @@ func (s *testSuite) TestExplain(c *C) {
"table": "t2",
"desc": false,
"keep order": false,
"access condition": null,
"count of pushed aggregate functions": 0,
"limit": 0
"push down info": {
"limit": 0,
"access conditions": null,
"filter conditions": null
}
}
}`,
},
Expand All @@ -91,11 +97,13 @@ func (s *testSuite) TestExplain(c *C) {
"table": "t1",
"desc": false,
"keep order": false,
"access condition": [
"gt(test.t1.c1, 0)"
],
"count of pushed aggregate functions": 0,
"limit": 0
"push down info": {
"limit": 0,
"access conditions": [
"gt(test.t1.c1, 0)"
],
"filter conditions": null
}
}`,
},
{
Expand All @@ -109,11 +117,13 @@ func (s *testSuite) TestExplain(c *C) {
"desc": false,
"out of order": true,
"double read": false,
"access condition": [
"eq(test.t1.c2, 1)"
],
"count of pushed aggregate functions": 0,
"limit": 0
"push down info": {
"limit": 0,
"access conditions": [
"eq(test.t1.c2, 1)"
],
"filter conditions": null
}
}`,
},
{
Expand All @@ -132,21 +142,25 @@ func (s *testSuite) TestExplain(c *C) {
"table": "t1",
"desc": false,
"keep order": false,
"access condition": [
"gt(test.t1.c1, 1)"
],
"count of pushed aggregate functions": 0,
"limit": 0
"push down info": {
"limit": 0,
"access conditions": [
"gt(test.t1.c1, 1)"
],
"filter conditions": null
}
},
"rightPlan": {
"type": "TableScan",
"db": "test",
"table": "t2",
"desc": false,
"keep order": false,
"access condition": null,
"count of pushed aggregate functions": 0,
"limit": 0
"push down info": {
"limit": 0,
"access conditions": null,
"filter conditions": null
}
}
}`,
},
Expand All @@ -161,11 +175,13 @@ func (s *testSuite) TestExplain(c *C) {
"table": "t1",
"desc": false,
"keep order": false,
"access condition": [
"eq(test.t1.c1, 1)"
],
"count of pushed aggregate functions": 0,
"limit": 0
"push down info": {
"limit": 0,
"access conditions": [
"eq(test.t1.c1, 1)"
],
"filter conditions": null
}
}
]
}`,
Expand All @@ -184,61 +200,75 @@ func (s *testSuite) TestExplain(c *C) {
"desc": false,
"out of order": true,
"double read": false,
"access condition": [
"eq(test.t1.c2, 1)"
],
"count of pushed aggregate functions": 0,
"limit": 0
"push down info": {
"limit": 0,
"access conditions": [
"eq(test.t1.c2, 1)"
],
"filter conditions": null
}
}
]
}`,
},
{
"select count(b.b) from t a, t b where a.a = b.a group by a.b",
"select count(b.c2) from t1 a, t2 b where a.c1 = b.c2 group by a.c1",
`{
"type": "CompleteAgg",
"AggFuncs": [
"count(join_agg_0)"
],
"GroupByItems": [
"a.b"
"a.c1"
],
"child": {
"type": "InnerJoin",
"eqCond": [
"eq(a.a, b.a)"
"eq(a.c1, b.c2)"
],
"leftCond": null,
"rightCond": null,
"otherCond": null,
"leftPlan": {
"type": "TableScan",
"db": "test",
"table": "t",
"table": "t1",
"desc": false,
"keep order": false,
"access condition": null,
"count of pushed aggregate functions": 0,
"limit": 0
"push down info": {
"limit": 0,
"access conditions": null,
"filter conditions": null
}
},
"rightPlan": {
"type": "FinalAgg",
"AggFuncs": [
"count([b.b])",
"firstrow([b.a])"
"count([b.c2])",
"firstrow([b.c2])"
],
"GroupByItems": [
"[b.a]"
"[b.c2]"
],
"child": {
"type": "TableScan",
"db": "test",
"table": "t",
"table": "t2",
"desc": false,
"keep order": false,
"access condition": null,
"count of pushed aggregate functions": 2,
"limit": 0
"push down info": {
"limit": 0,
"aggregated push down": true,
"gby items": [
"b.c2"
],
"agg funcs": [
"count(b.c2)",
"firstrow(b.c2)"
],
"access conditions": null,
"filter conditions": null
}
}
}
}
Expand Down
65 changes: 47 additions & 18 deletions plan/physical_plans.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,47 @@ type physicalTableSource struct {
conditions []expression.Expression
}

// MarshalJSON implements json.Marshaler interface.
func (p *physicalTableSource) MarshalJSON() ([]byte, error) {
buffer := bytes.NewBufferString("{")
limit := 0
if p.LimitCount != nil {
limit = int(*p.LimitCount)
}
buffer.WriteString(fmt.Sprintf("\"limit\": %d, \n", limit))
if p.Aggregated {
buffer.WriteString(fmt.Sprint("\"aggregated push down\": true, \n"))
gbyItems, err := json.Marshal(p.gbyItems)
if err != nil {
return nil, errors.Trace(err)
}
buffer.WriteString(fmt.Sprintf("\"gby items\": %s, \n", gbyItems))
aggFuncs, err := json.Marshal(p.aggFuncs)
if err != nil {
return nil, errors.Trace(err)
}
buffer.WriteString(fmt.Sprintf("\"agg funcs\": %s, \n", aggFuncs))
} else if len(p.sortItems) > 0 {
sortItems, err := json.Marshal(p.sortItems)
if err != nil {
return nil, errors.Trace(err)
}
buffer.WriteString(fmt.Sprintf("\"sort items\": %s, \n", sortItems))
}
access, err := json.Marshal(p.AccessCondition)
if err != nil {
return nil, errors.Trace(err)
}
filter, err := json.Marshal(p.conditions)
if err != nil {
return nil, errors.Trace(err)
}
// print condition infos
buffer.WriteString(fmt.Sprintf("\"access conditions\": %s, \n", access))
buffer.WriteString(fmt.Sprintf("\"filter conditions\": %s}", filter))
return buffer.Bytes(), nil
}

func (p *physicalTableSource) clearForAggPushDown() {
p.AggFields = nil
p.AggFuncsPB = nil
Expand Down Expand Up @@ -341,11 +382,7 @@ func (p *PhysicalIndexScan) Copy() PhysicalPlan {

// MarshalJSON implements json.Marshaler interface.
func (p *PhysicalIndexScan) MarshalJSON() ([]byte, error) {
limit := 0
if p.LimitCount != nil {
limit = int(*p.LimitCount)
}
access, err := json.Marshal(p.AccessCondition)
pushDownInfo, err := json.Marshal(&p.physicalTableSource)
if err != nil {
return nil, errors.Trace(err)
}
Expand All @@ -358,10 +395,8 @@ func (p *PhysicalIndexScan) MarshalJSON() ([]byte, error) {
"\n \"desc\": %v,"+
"\n \"out of order\": %v,"+
"\n \"double read\": %v,"+
"\n \"access condition\": %s,"+
"\n \"count of pushed aggregate functions\": %d,"+
"\n \"limit\": %d\n}",
p.DBName.O, p.Table.Name.O, p.Index.Name.O, p.Ranges, p.Desc, p.OutOfOrder, p.DoubleRead, access, len(p.AggFuncsPB), limit))
"\n \"push down info\": %s\n}",
p.DBName.O, p.Table.Name.O, p.Index.Name.O, p.Ranges, p.Desc, p.OutOfOrder, p.DoubleRead, pushDownInfo))
return buffer.Bytes(), nil
}

Expand All @@ -373,11 +408,7 @@ func (p *PhysicalTableScan) Copy() PhysicalPlan {

// MarshalJSON implements json.Marshaler interface.
func (p *PhysicalTableScan) MarshalJSON() ([]byte, error) {
limit := 0
if p.LimitCount != nil {
limit = int(*p.LimitCount)
}
access, err := json.Marshal(p.AccessCondition)
pushDownInfo, err := json.Marshal(&p.physicalTableSource)
if err != nil {
return nil, errors.Trace(err)
}
Expand All @@ -387,10 +418,8 @@ func (p *PhysicalTableScan) MarshalJSON() ([]byte, error) {
"\n \"table\": \"%s\","+
"\n \"desc\": %v,"+
"\n \"keep order\": %v,"+
"\n \"access condition\": %s,"+
"\n \"count of pushed aggregate functions\": %d,"+
"\n \"limit\": %d}",
p.DBName.O, p.Table.Name.O, p.Desc, p.KeepOrder, access, len(p.AggFuncsPB), limit))
"\n \"push down info\": %s}",
p.DBName.O, p.Table.Name.O, p.Desc, p.KeepOrder, pushDownInfo))
return buffer.Bytes(), nil
}

Expand Down

0 comments on commit 2f1cd5b

Please sign in to comment.