From bb70acda136e40bc8b94dcaf2a6d4088c7277218 Mon Sep 17 00:00:00 2001 From: goroutine Date: Tue, 15 Nov 2016 16:05:25 +0800 Subject: [PATCH] ddl: rename batchSize to batchCnt (#2005) --- ddl/column.go | 6 +++--- ddl/index.go | 14 +++++++------- ddl/reorg.go | 4 ++-- ddl/schema.go | 4 ++-- ddl/schema_test.go | 12 ++++++------ 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/ddl/column.go b/ddl/column.go index 8ef21a05bf820..4f133c25cb2ce 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -326,7 +326,7 @@ func (d *ddl) addTableColumn(t table.Table, columnInfo *model.ColumnInfo, reorgI } // backfillColumnInTxn deals with a part of backfilling column data in a Transaction. -// This part of the column data rows is defaultSmallBatchSize. +// This part of the column data rows is defaultSmallBatchCnt. func (d *ddl) backfillColumnInTxn(t table.Table, colID int64, handles []int64, colMap map[int64]*types.FieldType, defaultVal types.Datum, txn kv.Transaction) (int64, error) { nextHandle := handles[0] @@ -391,8 +391,8 @@ func (d *ddl) backfillColumn(t table.Table, columnInfo *model.ColumnInfo, handle var endIdx int for len(handles) > 0 { - if len(handles) >= defaultSmallBatchSize { - endIdx = defaultSmallBatchSize + if len(handles) >= defaultSmallBatchCnt { + endIdx = defaultSmallBatchCnt } else { endIdx = len(handles) } diff --git a/ddl/index.go b/ddl/index.go index e0e31426d7934..8c7c3c0b03237 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -365,8 +365,8 @@ func fetchRowColVals(txn kv.Transaction, t table.Table, handle int64, indexInfo return rowKey, vals, nil } -const defaultBatchSize = 1024 -const defaultSmallBatchSize = 128 +const defaultBatchCnt = 1024 +const defaultSmallBatchCnt = 128 // How to add index in reorganization state? // 1. Generate a snapshot with special version. @@ -417,7 +417,7 @@ func (d *ddl) getSnapshotRows(t table.Table, version uint64, seekHandle int64) ( } defer it.Close() - handles := make([]int64, 0, defaultBatchSize) + handles := make([]int64, 0, defaultBatchCnt) for it.Valid() { if !it.Key().HasPrefix(t.RecordPrefix()) { break @@ -430,7 +430,7 @@ func (d *ddl) getSnapshotRows(t table.Table, version uint64, seekHandle int64) ( } handles = append(handles, handle) - if len(handles) == defaultBatchSize { + if len(handles) == defaultBatchCnt { break } @@ -447,7 +447,7 @@ func (d *ddl) getSnapshotRows(t table.Table, version uint64, seekHandle int64) ( } // backfillIndexInTxn deals with a part of backfilling index data in a Transaction. -// This part of the index data rows is defaultSmallBatchSize. +// This part of the index data rows is defaultSmallBatchCnt. func (d *ddl) backfillIndexInTxn(t table.Table, kvIdx table.Index, handles []int64, txn kv.Transaction) (int64, error) { nextHandle := handles[0] for _, handle := range handles { @@ -489,8 +489,8 @@ func (d *ddl) backfillTableIndex(t table.Table, indexInfo *model.IndexInfo, hand var endIdx int kvIdx := tables.NewIndex(t.Meta(), indexInfo) for len(handles) > 0 { - if len(handles) >= defaultSmallBatchSize { - endIdx = defaultSmallBatchSize + if len(handles) >= defaultSmallBatchCnt { + endIdx = defaultSmallBatchCnt } else { endIdx = len(handles) } diff --git a/ddl/reorg.go b/ddl/reorg.go index 8055f7584632b..aa6a655f8cb89 100644 --- a/ddl/reorg.go +++ b/ddl/reorg.go @@ -176,12 +176,12 @@ func (d *ddl) delKeysWithStartKey(prefix, startKey kv.Key, jobType JobType, job var count int total := job.GetRowCount() - keys := make([]kv.Key, 0, defaultBatchSize) + keys := make([]kv.Key, 0, defaultBatchCnt) for { if limitedDel && count >= limit { break } - batch := defaultBatchSize + batch := defaultBatchCnt if limitedDel && count+batch > limit { batch = limit - count } diff --git a/ddl/schema.go b/ddl/schema.go index 1c606ab2d9bc8..2f1fa8dd0fdcc 100644 --- a/ddl/schema.go +++ b/ddl/schema.go @@ -171,12 +171,12 @@ func (d *ddl) dropSchemaData(tIDs []int64, startKey kv.Key, job *model.Job, m *m if startKey == nil { startKey = tablecodec.EncodeTablePrefix(id) } - delCount, err := d.dropTableData(startKey, job, defaultBatchSize) + delCount, err := d.dropTableData(startKey, job, defaultBatchCnt) if err != nil { return false, errors.Trace(err) } - if delCount == defaultBatchSize { + if delCount == defaultBatchCnt { isFinished = false nextStartKey = job.Args[len(job.Args)-1].(kv.Key) break diff --git a/ddl/schema_test.go b/ddl/schema_test.go index 4e2f70b0eaeb2..ad96b73a4cb8b 100644 --- a/ddl/schema_test.go +++ b/ddl/schema_test.go @@ -137,13 +137,13 @@ func (s *testSchemaSuite) TestSchema(c *C) { _, err := tbl1.AddRecord(ctx, types.MakeDatums(i, i, i)) c.Assert(err, IsNil) } - // create table t1 with defaultBatchSize+10 records. + // create table t1 with defaultBatchCnt+10 records. tblInfo2 := testTableInfo(c, d, "t1", 3) tJob2 := testCreateTable(c, ctx, d, dbInfo, tblInfo2) testCheckTableState(c, d, dbInfo, tblInfo2, model.StatePublic) testCheckJobDone(c, d, tJob2, true) tbl2 := testGetTable(c, d, dbInfo.ID, tblInfo2.ID) - for i := 1; i <= defaultBatchSize+10; i++ { + for i := 1; i <= defaultBatchCnt+10; i++ { _, err := tbl2.AddRecord(ctx, types.MakeDatums(i, i, i)) c.Assert(err, IsNil) } @@ -157,12 +157,12 @@ func (s *testSchemaSuite) TestSchema(c *C) { job.Mu.Lock() count := job.RowCount job.Mu.Unlock() - if updatedCount == 0 && count != defaultBatchSize+100 { - checkErr = errors.Errorf("row count %v isn't equal to %v", count, defaultBatchSize+100) + if updatedCount == 0 && count != defaultBatchCnt+100 { + checkErr = errors.Errorf("row count %v isn't equal to %v", count, defaultBatchCnt+100) return } - if updatedCount == 1 && count != defaultBatchSize+110 { - checkErr = errors.Errorf("row count %v isn't equal to %v", count, defaultBatchSize+110) + if updatedCount == 1 && count != defaultBatchCnt+110 { + checkErr = errors.Errorf("row count %v isn't equal to %v", count, defaultBatchCnt+110) } updatedCount++ }