Skip to content

Commit

Permalink
*: Optimize struct memory usage by adjust field order (pingcap#11629)
Browse files Browse the repository at this point in the history
  • Loading branch information
Deardrops authored and winkyao committed Aug 13, 2019
1 parent 3d6c561 commit 6d51ad3
Show file tree
Hide file tree
Showing 36 changed files with 175 additions and 168 deletions.
2 changes: 1 addition & 1 deletion cmd/importer/data.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ type datum struct {
timeValue time.Time
remains uint64
repeats uint64
probability uint32
step int64
probability uint32

init bool
useRange bool
Expand Down
18 changes: 9 additions & 9 deletions config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,29 +174,29 @@ func (s *Security) ToTLSConfig() (*tls.Config, error) {

// Status is the status section of the config.
type Status struct {
ReportStatus bool `toml:"report-status" json:"report-status"`
StatusHost string `toml:"status-host" json:"status-host"`
StatusPort uint `toml:"status-port" json:"status-port"`
MetricsAddr string `toml:"metrics-addr" json:"metrics-addr"`
StatusPort uint `toml:"status-port" json:"status-port"`
MetricsInterval uint `toml:"metrics-interval" json:"metrics-interval"`
ReportStatus bool `toml:"report-status" json:"report-status"`
RecordQPSbyDB bool `toml:"record-db-qps" json:"record-db-qps"`
}

// Performance is the performance section of the config.
type Performance struct {
MaxProcs uint `toml:"max-procs" json:"max-procs"`
MaxMemory uint64 `toml:"max-memory" json:"max-memory"`
TCPKeepAlive bool `toml:"tcp-keep-alive" json:"tcp-keep-alive"`
CrossJoin bool `toml:"cross-join" json:"cross-join"`
StatsLease string `toml:"stats-lease" json:"stats-lease"`
RunAutoAnalyze bool `toml:"run-auto-analyze" json:"run-auto-analyze"`
StmtCountLimit uint `toml:"stmt-count-limit" json:"stmt-count-limit"`
FeedbackProbability float64 `toml:"feedback-probability" json:"feedback-probability"`
QueryFeedbackLimit uint `toml:"query-feedback-limit" json:"query-feedback-limit"`
PseudoEstimateRatio float64 `toml:"pseudo-estimate-ratio" json:"pseudo-estimate-ratio"`
ForcePriority string `toml:"force-priority" json:"force-priority"`
BindInfoLease string `toml:"bind-info-lease" json:"bind-info-lease"`
TxnTotalSizeLimit uint64 `toml:"txn-total-size-limit" json:"txn-total-size-limit"`
TCPKeepAlive bool `toml:"tcp-keep-alive" json:"tcp-keep-alive"`
CrossJoin bool `toml:"cross-join" json:"cross-join"`
RunAutoAnalyze bool `toml:"run-auto-analyze" json:"run-auto-analyze"`
}

// PlanCache is the PlanCache section of the config.
Expand All @@ -222,9 +222,9 @@ type PreparedPlanCache struct {
// OpenTracing is the opentracing section of the config.
type OpenTracing struct {
Enable bool `toml:"enable" json:"enable"`
RPCMetrics bool `toml:"rpc-metrics" json:"rpc-metrics"`
Sampler OpenTracingSampler `toml:"sampler" json:"sampler"`
Reporter OpenTracingReporter `toml:"reporter" json:"reporter"`
RPCMetrics bool `toml:"rpc-metrics" json:"rpc-metrics"`
}

// OpenTracingSampler is the config for opentracing sampler.
Expand Down Expand Up @@ -285,11 +285,11 @@ type TiKVClient struct {

// Binlog is the config for binlog.
type Binlog struct {
Enable bool `toml:"enable" json:"enable"`
WriteTimeout string `toml:"write-timeout" json:"write-timeout"`
Enable bool `toml:"enable" json:"enable"`
// If IgnoreError is true, when writing binlog meets error, TiDB would
// ignore the error.
IgnoreError bool `toml:"ignore-error" json:"ignore-error"`
IgnoreError bool `toml:"ignore-error" json:"ignore-error"`
WriteTimeout string `toml:"write-timeout" json:"write-timeout"`
// Use socket file to write binlog, for compatible with kafka version tidb-binlog.
BinlogSocket string `toml:"binlog-socket" json:"binlog-socket"`
// The strategy for sending binlog to pump, value can be "range" or "hash" now.
Expand Down
4 changes: 2 additions & 2 deletions ddl/ddl_worker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -353,9 +353,9 @@ func checkCancelState(txn kv.Transaction, job *model.Job, test *testCancelJob) e
}

type testCancelJob struct {
act model.ActionType // act is the job action.
jobIDs []int64
cancelRetErrs []error // cancelRetErrs is the first return value of CancelJobs.
cancelRetErrs []error // cancelRetErrs is the first return value of CancelJobs.
act model.ActionType // act is the job action.
cancelState model.SchemaState
}

Expand Down
14 changes: 7 additions & 7 deletions ddl/delete_range.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,14 +55,14 @@ type delRangeManager interface {
}

type delRange struct {
store kv.Storage
sessPool *sessionPool
storeSupport bool
emulatorCh chan struct{}
keys []kv.Key
quitCh chan struct{}
store kv.Storage
sessPool *sessionPool
emulatorCh chan struct{}
keys []kv.Key
quitCh chan struct{}

wait sync.WaitGroup // wait is only used when storeSupport is false.
wait sync.WaitGroup // wait is only used when storeSupport is false.
storeSupport bool
}

// newDelRangeManager returns a delRangeManager.
Expand Down
4 changes: 2 additions & 2 deletions domain/domain.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,18 +61,18 @@ type Domain struct {
bindHandle *bindinfo.BindHandle
statsHandle unsafe.Pointer
statsLease time.Duration
statsUpdating sync2.AtomicInt32
ddl ddl.DDL
info *InfoSyncer
m sync.Mutex
SchemaValidator SchemaValidator
sysSessionPool *sessionPool
exit chan struct{}
etcdClient *clientv3.Client
wg sync.WaitGroup
gvc GlobalVariableCache
slowQuery *topNSlowQueries
expensiveQueryHandle *expensivequery.Handle
wg sync.WaitGroup
statsUpdating sync2.AtomicInt32
}

// loadInfoSchema loads infoschema at startTS into handle, usedSchemaVersion is the currently used
Expand Down
4 changes: 2 additions & 2 deletions domain/topn_slow_query.go
Original file line number Diff line number Diff line change
Expand Up @@ -217,13 +217,13 @@ type SlowQueryInfo struct {
Start time.Time
Duration time.Duration
Detail execdetails.ExecDetails
Succ bool
ConnID uint64
TxnTS uint64
User string
DB string
TableIDs string
IndexIDs string
Internal bool
Digest string
Internal bool
Succ bool
}
10 changes: 5 additions & 5 deletions executor/adapter.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,18 +146,18 @@ type ExecStmt struct {
InfoSchema infoschema.InfoSchema
// Plan stores a reference to the final physical plan.
Plan plannercore.Plan
// LowerPriority represents whether to lower the execution priority of a query.
LowerPriority bool
// Cacheable represents whether the physical plan can be cached.
Cacheable bool
// Text represents the origin query text.
Text string

StmtNode ast.StmtNode

Ctx sessionctx.Context
// StartTime stands for the starting time when executing the statement.
StartTime time.Time
StartTime time.Time
// LowerPriority represents whether to lower the execution priority of a query.
LowerPriority bool
// Cacheable represents whether the physical plan can be cached.
Cacheable bool
isPreparedStmt bool
isSelectForUpdate bool
retryCount uint
Expand Down
13 changes: 6 additions & 7 deletions executor/aggregate.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,6 @@ type AfFinalResult struct {
type HashAggExec struct {
baseExecutor

prepared bool
sc *stmtctx.StatementContext
PartialAggFuncs []aggfuncs.AggFunc
FinalAggFuncs []aggfuncs.AggFunc
Expand All @@ -144,10 +143,6 @@ type HashAggExec struct {
groupKeyBuffer []byte
groupValDatums []types.Datum

// After we support parallel execution for aggregation functions with distinct,
// we can remove this attribute.
isUnparallelExec bool

finishCh chan struct{}
finalOutputCh chan *AfFinalResult
finalInputCh chan *chunk.Chunk
Expand All @@ -157,10 +152,14 @@ type HashAggExec struct {
partialWorkers []HashAggPartialWorker
finalWorkers []HashAggFinalWorker
defaultVal *chunk.Chunk
childResult *chunk.Chunk

// isChildReturnEmpty indicates whether the child executor only returns an empty input.
isChildReturnEmpty bool

childResult *chunk.Chunk
// After we support parallel execution for aggregation functions with distinct,
// we can remove this attribute.
isUnparallelExec bool
prepared bool
}

// HashAggInput indicates the input of hash agg exec.
Expand Down
4 changes: 2 additions & 2 deletions executor/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -1240,14 +1240,14 @@ type UnionExec struct {
baseExecutor

stopFetchData atomic.Value
wg sync.WaitGroup

finished chan struct{}
resourcePools []chan *chunk.Chunk
resultPool chan *unionWorkerResult
initialized bool

childrenResults []*chunk.Chunk
wg sync.WaitGroup
initialized bool
}

// unionWorkerResult stores the result for a union worker.
Expand Down
6 changes: 3 additions & 3 deletions executor/grant.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,10 @@ type GrantExec struct {
ObjectType ast.ObjectTypeType
Level *ast.GrantLevel
Users []*ast.UserSpec
WithGrant bool

is infoschema.InfoSchema
done bool
is infoschema.InfoSchema
WithGrant bool
done bool
}

// Next implements the Executor Next interface.
Expand Down
11 changes: 5 additions & 6 deletions executor/join.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@ type HashJoinExec struct {
outerKeys []*expression.Column
innerKeys []*expression.Column

prepared bool
// concurrency is the number of partition, build and join workers.
concurrency uint
hashTable *mvmap.MVMap
Expand All @@ -58,10 +57,8 @@ type HashJoinExec struct {
joinWorkerWaitGroup sync.WaitGroup
finished atomic.Value
// closeCh add a lock for closing executor.
closeCh chan struct{}
joinType plannercore.JoinType

isOuterJoin bool
closeCh chan struct{}
joinType plannercore.JoinType
requiredRows int64

// We build individual joiner for each join worker when use chunk-based
Expand All @@ -77,7 +74,9 @@ type HashJoinExec struct {
joinResultCh chan *hashjoinWorkerResult
hashTableValBufs [][][]byte

memTracker *memory.Tracker // track memory usage.
memTracker *memory.Tracker // track memory usage.
prepared bool
isOuterJoin bool
}

// outerChkResource stores the result of the join outer fetch worker,
Expand Down
4 changes: 2 additions & 2 deletions executor/load_data.go
Original file line number Diff line number Diff line change
Expand Up @@ -351,14 +351,14 @@ type field struct {

type fieldWriter struct {
pos int
ReadBuf []byte
OutputBuf []byte
enclosedChar byte
fieldTermChar byte
term string
isEnclosed bool
isLineStart bool
isFieldStart bool
ReadBuf []byte
OutputBuf []byte
}

func (w *fieldWriter) Init(enclosedChar byte, fieldTermChar byte, readBuf []byte, term string) {
Expand Down
2 changes: 1 addition & 1 deletion executor/prepared.go
Original file line number Diff line number Diff line change
Expand Up @@ -205,10 +205,10 @@ type ExecuteExec struct {
is infoschema.InfoSchema
name string
usingVars []expression.Expression
id uint32
stmtExec Executor
stmt ast.StmtNode
plan plannercore.Plan
id uint32
lowerPriority bool
}

Expand Down
25 changes: 12 additions & 13 deletions executor/show.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,24 +58,23 @@ var etcdDialTimeout = 5 * time.Second
type ShowExec struct {
baseExecutor

Tp ast.ShowStmtType // Databases/Tables/Columns/....
DBName model.CIStr
Table *ast.TableName // Used for showing columns.
Column *ast.ColumnName // Used for `desc table column`.
IndexName model.CIStr // Used for show table regions.
Flag int // Some flag parsed from sql, such as FULL.
Full bool
User *auth.UserIdentity // Used by show grants, show create user.
Roles []*auth.RoleIdentity // Used for show grants.
IfNotExists bool // Used for `show create database if not exists`

// GlobalScope is used by show variables
GlobalScope bool
Tp ast.ShowStmtType // Databases/Tables/Columns/....
DBName model.CIStr
Table *ast.TableName // Used for showing columns.
Column *ast.ColumnName // Used for `desc table column`.
IndexName model.CIStr // Used for show table regions.
Flag int // Some flag parsed from sql, such as FULL.
Roles []*auth.RoleIdentity // Used for show grants.
User *auth.UserIdentity // Used by show grants, show create user.

is infoschema.InfoSchema

result *chunk.Chunk
cursor int

Full bool
IfNotExists bool // Used for `show create database if not exists`
GlobalScope bool // GlobalScope is used by show variables
}

// Next implements the Executor Next interface.
Expand Down
19 changes: 9 additions & 10 deletions executor/table_reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,10 +55,8 @@ func (sr selectResultHook) SelectResult(ctx context.Context, sctx sessionctx.Con
type TableReaderExecutor struct {
baseExecutor

table table.Table
keepOrder bool
desc bool
ranges []*ranger.Range
table table.Table
ranges []*ranger.Range
// kvRanges are only use for union scan.
kvRanges []kv.KeyRange
dagPB *tipb.DAGRequest
Expand All @@ -68,18 +66,19 @@ type TableReaderExecutor struct {
// resultHandler handles the order of the result. Since (MAXInt64, MAXUint64] stores before [0, MaxInt64] physically
// for unsigned int.
resultHandler *tableResultHandler
streaming bool
feedback *statistics.QueryFeedback
plans []plannercore.PhysicalPlan

memTracker *memory.Tracker
selectResultHook // for testing

keepOrder bool
desc bool
streaming bool
// corColInFilter tells whether there's correlated column in filter.
corColInFilter bool
// corColInAccess tells whether there's correlated column in access conditions.
corColInAccess bool
plans []plannercore.PhysicalPlan

memTracker *memory.Tracker

selectResultHook // for testing
}

// Open initialzes necessary variables for using this executor.
Expand Down
2 changes: 1 addition & 1 deletion expression/aggregation/concat.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ import (
type concatFunction struct {
aggFunction
separator string
sepInited bool
maxLen uint64
sepInited bool
// truncated according to MySQL, a 'group_concat' function generates exactly one 'truncated' warning during its life time, no matter
// how many group actually truncated. 'truncated' acts as a sentinel to indicate whether this warning has already been
// generated.
Expand Down
Loading

0 comments on commit 6d51ad3

Please sign in to comment.