diff --git a/bench/load/concurrentexec/batchWorkflow.go b/bench/load/concurrentexec/batchWorkflow.go index 5134a200229..f2b8e168c27 100644 --- a/bench/load/concurrentexec/batchWorkflow.go +++ b/bench/load/concurrentexec/batchWorkflow.go @@ -143,7 +143,7 @@ func concurrentActivity( ) (time.Duration, error) { var latency time.Duration if activity.GetInfo(ctx).Attempt == 0 { - latency = time.Now().Sub(time.Unix(0, scheduledTimeNanos)) + latency = time.Since(time.Unix(0, scheduledTimeNanos)) } time.Sleep(time.Duration(rand.Intn(maxSleepTimeInSeconds)) * time.Second) diff --git a/canary/common.go b/canary/common.go index 153dabb1c41..870bb9e8f74 100644 --- a/canary/common.go +++ b/canary/common.go @@ -53,7 +53,7 @@ func int32Ptr(v int32) *int32 { // getContextValue retrieves and returns the value corresponding // to the given key - panics if the key does not exist -func getContextValue(ctx context.Context, key string) interface{} { +func getContextValue(ctx context.Context, key contextKey) interface{} { value := ctx.Value(key) if value == nil { panic("ctx.Value(" + key + ") returned nil") diff --git a/canary/const.go b/canary/const.go index fc70d5b186d..3204a9b3fae 100644 --- a/canary/const.go +++ b/canary/const.go @@ -35,11 +35,11 @@ const ( activityTaskTimeout = 3 * time.Minute childWorkflowTimeout = 6 * time.Minute taskListName = "canary-task-queue" - ctxKeyActivityRuntime = "runtime" - ctxKeyActivityArchivalRuntime = "runtime-archival" - ctxKeyActivitySystemClient = "system-client" - ctxKeyActivityBatcherClient = "batcher-client" - ctxKeyConfig = "runtime-config" + ctxKeyActivityRuntime = contextKey("runtime") + ctxKeyActivityArchivalRuntime = contextKey("runtime-archival") + ctxKeyActivitySystemClient = contextKey("system-client") + ctxKeyActivityBatcherClient = contextKey("batcher-client") + ctxKeyConfig = contextKey("runtime-config") archivalDomain = "canary-archival-domain" archivalTaskListName = "canary-archival-task-queue" ) diff --git a/canary/historyArchival.go b/canary/historyArchival.go index 083eb7cd7c8..0fce26278c4 100644 --- a/canary/historyArchival.go +++ b/canary/historyArchival.go @@ -172,5 +172,5 @@ func archivalExternalWorkflow(ctx workflow.Context, scheduledTimeNanos int64) er } func largeResultActivity() ([]byte, error) { - return make([]byte, resultSize, resultSize), nil + return make([]byte, resultSize), nil } diff --git a/canary/retry.go b/canary/retry.go index dbf364c1e55..73ec42d7246 100644 --- a/canary/retry.go +++ b/canary/retry.go @@ -32,9 +32,9 @@ import ( ) var ( - errRetryableActivityError = errors.New("Retry me") - errUnexpectedProgress = errors.New("Unexpected progress") - errUnexpectedResult = errors.New("Unexpected result") + errRetryableActivityError = errors.New("retry me") + errUnexpectedProgress = errors.New("unexpected progress") + errUnexpectedResult = errors.New("unexpected result") ) func init() { diff --git a/canary/timeout.go b/canary/timeout.go index 22114f9b471..179545e5155 100644 --- a/canary/timeout.go +++ b/canary/timeout.go @@ -82,9 +82,7 @@ func timeoutActivity(ctx context.Context, scheduledTimeNanos int64) error { defer recordActivityEnd(scope, sw, err) timer := time.NewTimer(activityDelay) - select { - case <-timer.C: - } + <-timer.C timer.Stop() return nil diff --git a/cmd/server/cadence/cadence.go b/cmd/server/cadence/cadence.go index dfb8111d4c5..0bdda06f21f 100644 --- a/cmd/server/cadence/cadence.go +++ b/cmd/server/cadence/cadence.go @@ -87,16 +87,12 @@ func startHandler(c *cli.Context) { server.Start() } - select { - case <-sigc: - { - log.Println("Received SIGTERM signal, initiating shutdown.") - for _, daemon := range daemons { - daemon.Stop() - } - os.Exit(0) - } + <-sigc + log.Println("Received SIGTERM signal, initiating shutdown.") + for _, daemon := range daemons { + daemon.Stop() } + os.Exit(0) } func getEnvironment(c *cli.Context) string { diff --git a/common/archiver/filestore/queryParser.go b/common/archiver/filestore/queryParser.go index 9dedabb7e9d..ec98055237c 100644 --- a/common/archiver/filestore/queryParser.go +++ b/common/archiver/filestore/queryParser.go @@ -95,13 +95,13 @@ func (p *queryParser) convertWhereExpr(expr sqlparser.Expr, parsedQuery *parsedQ return errors.New("where expression is nil") } - switch expr.(type) { + switch expr := expr.(type) { case *sqlparser.ComparisonExpr: - return p.convertComparisonExpr(expr.(*sqlparser.ComparisonExpr), parsedQuery) + return p.convertComparisonExpr(expr, parsedQuery) case *sqlparser.AndExpr: - return p.convertAndExpr(expr.(*sqlparser.AndExpr), parsedQuery) + return p.convertAndExpr(expr, parsedQuery) case *sqlparser.ParenExpr: - return p.convertParenExpr(expr.(*sqlparser.ParenExpr), parsedQuery) + return p.convertParenExpr(expr, parsedQuery) default: return errors.New("only comparison and \"and\" expression is supported") } diff --git a/common/archiver/gcloud/connector/client.go b/common/archiver/gcloud/connector/client.go index deaa87b7dae..3e25ed0ec16 100644 --- a/common/archiver/gcloud/connector/client.go +++ b/common/archiver/gcloud/connector/client.go @@ -27,7 +27,6 @@ import ( "io" "io/ioutil" "os" - "regexp" "cloud.google.com/go/storage" "google.golang.org/api/iterator" @@ -36,15 +35,10 @@ import ( "github.com/uber/cadence/common/config" ) -const ( - bucketNameRegExpRaw = "^gs:\\/\\/[^:\\/\n?]+" -) - var ( // ErrBucketNotFound is non retriable error that is thrown when the bucket doesn't exist ErrBucketNotFound = errors.New("bucket not found") errObjectNotFound = errors.New("object not found") - bucketNameRegExp = regexp.MustCompile(bucketNameRegExpRaw) ) type ( @@ -109,7 +103,6 @@ func (s *storageWrapper) Upload(ctx context.Context, URI archiver.URI, fileName // Exist check if a bucket or an object exist // If fileName is empty, then 'Exist' function will only check if the given bucket exist. func (s *storageWrapper) Exist(ctx context.Context, URI archiver.URI, fileName string) (exists bool, err error) { - err = ErrBucketNotFound bucket := s.client.Bucket(URI.Hostname()) if _, err := bucket.Attrs(ctx); err != nil { return false, err diff --git a/common/archiver/gcloud/connector/clientDelegate.go b/common/archiver/gcloud/connector/clientDelegate.go index c5b0cb8e956..334797285d2 100644 --- a/common/archiver/gcloud/connector/clientDelegate.go +++ b/common/archiver/gcloud/connector/clientDelegate.go @@ -23,7 +23,6 @@ package connector import ( "context" "io/ioutil" - "os" "cloud.google.com/go/storage" "golang.org/x/oauth2/google" @@ -97,20 +96,8 @@ type ( ObjectIteratorWrapper interface { Next() (*storage.ObjectAttrs, error) } - - objectIteratorDelegate struct { - iterator *storage.ObjectIterator - } ) -func newClientDelegate() (*clientDelegate, error) { - ctx := context.Background() - if credentialsPath := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS"); credentialsPath != "" { - return newClientDelegateWithCredentials(ctx, credentialsPath) - } - return newDefaultClientDelegate(ctx) -} - func newDefaultClientDelegate(ctx context.Context) (*clientDelegate, error) { nativeClient, err := storage.NewClient(ctx) return &clientDelegate{nativeClient: nativeClient}, err @@ -164,17 +151,6 @@ func (b *bucketDelegate) Attrs(ctx context.Context) (*storage.BucketAttrs, error return b.bucket.Attrs(ctx) } -// Next returns the next result. Its second return value is iterator.Done if -// there are no more results. Once Next returns iterator.Done, all subsequent -// calls will return iterator.Done. -// -// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will -// have a non-empty Prefix field, and a zero value for all other fields. These -// represent prefixes. -func (o *objectIteratorDelegate) Next() (*storage.ObjectAttrs, error) { - return o.iterator.Next() -} - // NewWriter returns a storage Writer that writes to the GCS object // associated with this ObjectHandle. // diff --git a/common/archiver/gcloud/connector/client_test.go b/common/archiver/gcloud/connector/client_test.go index ead5a52834c..5b945432aa7 100644 --- a/common/archiver/gcloud/connector/client_test.go +++ b/common/archiver/gcloud/connector/client_test.go @@ -84,6 +84,7 @@ func (s *clientSuite) TestUpload() { mockWriter.On("Close").Return(nil).Times(1) URI, err := archiver.NewURI("gs://my-bucket-cad/cadence_archival/development") + s.Require().NoError(err) err = storageWrapper.Upload(ctx, URI, "myfile.history", []byte("{}")) s.Require().NoError(err) } @@ -105,6 +106,7 @@ func (s *clientSuite) TestUploadWriterCloseError() { mockWriter.On("Close").Return(errors.New("Not Found")).Times(1) URI, err := archiver.NewURI("gs://my-bucket-cad/cadence_archival/development") + s.Require().NoError(err) err = storageWrapper.Upload(ctx, URI, "myfile.history", []byte("{}")) s.Require().EqualError(err, "Not Found") } @@ -211,6 +213,7 @@ func (s *clientSuite) TestGet() { mockReader.On("Close").Return(nil).Times(1) URI, err := archiver.NewURI("gs://my-bucket-cad/cadence_archival/development") + s.Require().NoError(err) _, err = storageWrapper.Get(ctx, URI, "myfile.history") s.Require().NoError(err) } @@ -253,6 +256,7 @@ func (s *clientSuite) TestQuery() { var fileNames []string URI, err := archiver.NewURI("gs://my-bucket-cad/cadence_archival/development") + s.Require().NoError(err) fileNames, err = storageWrapper.Query(ctx, URI, "7478875943689868082123907395549832634615673687049942026838") s.Require().NoError(err) s.Equal(strings.Join(fileNames, ", "), "fileName_01") @@ -299,6 +303,7 @@ func (s *clientSuite) TestQueryWithFilter() { var fileNames []string URI, err := archiver.NewURI("gs://my-bucket-cad/cadence_archival/development") + s.Require().NoError(err) fileNames, _, _, err = storageWrapper.QueryWithFilters(ctx, URI, "closeTimeout_2020-02-27T09:42:28Z", 0, 0, []connector.Precondition{newWorkflowIDPrecondition("4418294404690464320")}) s.Require().NoError(err) diff --git a/common/archiver/gcloud/historyArchiver_test.go b/common/archiver/gcloud/historyArchiver_test.go index f66927d39c8..7bf0a8d79b9 100644 --- a/common/archiver/gcloud/historyArchiver_test.go +++ b/common/archiver/gcloud/historyArchiver_test.go @@ -552,6 +552,7 @@ func (h *historyArchiverSuite) TestGet_Success_FromToken() { ctx := context.Background() mockCtrl := gomock.NewController(h.T()) URI, err := archiver.NewURI("gs://my-bucket-cad/cadence_archival/development") + h.Require().NoError(err) storageWrapper := &mocks.Client{} storageWrapper.On("Exist", ctx, URI, "").Return(true, nil).Times(1) storageWrapper.On("Query", ctx, URI, "71817125141568232911739672280485489488911532452831150339470").Return([]string{"905702227796330300141628222723188294514017512010591354159_-24_0.history", "905702227796330300141628222723188294514017512010591354159_-24_1.history", "905702227796330300141628222723188294514017512010591354159_-24_2.history", "905702227796330300141628222723188294514017512010591354159_-24_3.history", "905702227796330300141628222723188294514017512010591354159_-25_0.history"}, nil).Times(1) diff --git a/common/archiver/gcloud/queryParser.go b/common/archiver/gcloud/queryParser.go index fa53848fc2d..72ce48a12cc 100644 --- a/common/archiver/gcloud/queryParser.go +++ b/common/archiver/gcloud/queryParser.go @@ -26,13 +26,11 @@ import ( "errors" "fmt" "strconv" - "strings" "time" "github.com/xwb1989/sqlparser" "github.com/uber/cadence/common" - "github.com/uber/cadence/common/types" ) type ( @@ -96,7 +94,7 @@ func (p *queryParser) Parse(query string) (*parsedQuery, error) { } if (parsedQuery.closeTime == 0 && parsedQuery.startTime == 0) || (parsedQuery.closeTime != 0 && parsedQuery.startTime != 0) { - return nil, errors.New("Requires a StartTime or CloseTime") + return nil, errors.New("requires a StartTime or CloseTime") } if parsedQuery.searchPrecision == nil { @@ -111,13 +109,13 @@ func (p *queryParser) convertWhereExpr(expr sqlparser.Expr, parsedQuery *parsedQ return errors.New("where expression is nil") } - switch expr.(type) { + switch expr := expr.(type) { case *sqlparser.ComparisonExpr: - return p.convertComparisonExpr(expr.(*sqlparser.ComparisonExpr), parsedQuery) + return p.convertComparisonExpr(expr, parsedQuery) case *sqlparser.AndExpr: - return p.convertAndExpr(expr.(*sqlparser.AndExpr), parsedQuery) + return p.convertAndExpr(expr, parsedQuery) case *sqlparser.ParenExpr: - return p.convertParenExpr(expr.(*sqlparser.ParenExpr), parsedQuery) + return p.convertParenExpr(expr, parsedQuery) default: return errors.New("only comparison and \"and\" expression is supported") } @@ -233,21 +231,6 @@ func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, return nil } -func (p *queryParser) convertCloseTime(timestamp int64, op string, parsedQuery *parsedQuery) error { - switch op { - case "=": - if err := p.convertCloseTime(timestamp, ">=", parsedQuery); err != nil { - return err - } - if err := p.convertCloseTime(timestamp, "<=", parsedQuery); err != nil { - return err - } - default: - return fmt.Errorf("operator %s is not supported for close time", op) - } - return nil -} - func convertToTimestamp(timeStr string) (int64, error) { timestamp, err := strconv.ParseInt(timeStr, 10, 64) if err == nil { @@ -264,24 +247,6 @@ func convertToTimestamp(timeStr string) (int64, error) { return parsedTime.UnixNano(), nil } -func convertStatusStr(statusStr string) (types.WorkflowExecutionCloseStatus, error) { - statusStr = strings.ToLower(statusStr) - switch statusStr { - case "completed": - return types.WorkflowExecutionCloseStatusCompleted, nil - case "failed": - return types.WorkflowExecutionCloseStatusFailed, nil - case "canceled": - return types.WorkflowExecutionCloseStatusCanceled, nil - case "continuedasnew": - return types.WorkflowExecutionCloseStatusContinuedAsNew, nil - case "timedout": - return types.WorkflowExecutionCloseStatusTimedOut, nil - default: - return 0, fmt.Errorf("unknown workflow close status: %s", statusStr) - } -} - func extractStringValue(s string) (string, error) { if len(s) >= 2 && s[0] == '\'' && s[len(s)-1] == '\'' { return s[1 : len(s)-1], nil diff --git a/common/archiver/gcloud/util.go b/common/archiver/gcloud/util.go index 1fcf04665aa..95cd7e18683 100644 --- a/common/archiver/gcloud/util.go +++ b/common/archiver/gcloud/util.go @@ -50,11 +50,6 @@ func decodeHistoryBatches(data []byte) ([]*types.History, error) { return historyBatches, nil } -func constructHistoryFilename(domainID, workflowID, runID string, version int64) string { - combinedHash := constructHistoryFilenamePrefix(domainID, workflowID, runID) - return fmt.Sprintf("%s_%v.history", combinedHash, version) -} - func constructHistoryFilenameMultipart(domainID, workflowID, runID string, version int64, partNumber int) string { combinedHash := constructHistoryFilenamePrefix(domainID, workflowID, runID) return fmt.Sprintf("%s_%v_%v.history", combinedHash, version, partNumber) diff --git a/common/archiver/s3store/historyArchiver.go b/common/archiver/s3store/historyArchiver.go index 5169c740019..3db48ded2b8 100644 --- a/common/archiver/s3store/historyArchiver.go +++ b/common/archiver/s3store/historyArchiver.go @@ -68,7 +68,6 @@ type ( s3cli s3iface.S3API // only set in test code historyIterator archiver.HistoryIterator - config *config.S3Archiver } getHistoryToken struct { diff --git a/common/archiver/s3store/historyArchiver_test.go b/common/archiver/s3store/historyArchiver_test.go index eed33298f6c..1d46b9216e2 100644 --- a/common/archiver/s3store/historyArchiver_test.go +++ b/common/archiver/s3store/historyArchiver_test.go @@ -46,7 +46,6 @@ import ( "github.com/uber/cadence/common" "github.com/uber/cadence/common/archiver" "github.com/uber/cadence/common/archiver/s3store/mocks" - "github.com/uber/cadence/common/log" "github.com/uber/cadence/common/log/loggerimpl" "github.com/uber/cadence/common/metrics" "github.com/uber/cadence/common/types" @@ -73,7 +72,6 @@ type historyArchiverSuite struct { suite.Suite s3cli *mocks.S3API container *archiver.HistoryBootstrapContainer - logger log.Logger testArchivalURI archiver.URI historyBatchesV1 []*archiver.HistoryBlob historyBatchesV100 []*archiver.HistoryBlob diff --git a/common/archiver/s3store/queryParser.go b/common/archiver/s3store/queryParser.go index 2878facbea2..c8c7af7cacd 100644 --- a/common/archiver/s3store/queryParser.go +++ b/common/archiver/s3store/queryParser.go @@ -110,13 +110,13 @@ func (p *queryParser) convertWhereExpr(expr sqlparser.Expr, parsedQuery *parsedQ return errors.New("where expression is nil") } - switch expr.(type) { + switch expr := expr.(type) { case *sqlparser.ComparisonExpr: - return p.convertComparisonExpr(expr.(*sqlparser.ComparisonExpr), parsedQuery) + return p.convertComparisonExpr(expr, parsedQuery) case *sqlparser.AndExpr: - return p.convertAndExpr(expr.(*sqlparser.AndExpr), parsedQuery) + return p.convertAndExpr(expr, parsedQuery) case *sqlparser.ParenExpr: - return p.convertParenExpr(expr.(*sqlparser.ParenExpr), parsedQuery) + return p.convertParenExpr(expr, parsedQuery) default: return errors.New("only comparison and \"and\" expression is supported") } diff --git a/common/archiver/s3store/visibilityArchiver_test.go b/common/archiver/s3store/visibilityArchiver_test.go index cd86a95ecaf..53157bbab3c 100644 --- a/common/archiver/s3store/visibilityArchiver_test.go +++ b/common/archiver/s3store/visibilityArchiver_test.go @@ -40,7 +40,6 @@ import ( "github.com/uber/cadence/common" "github.com/uber/cadence/common/archiver" "github.com/uber/cadence/common/archiver/s3store/mocks" - "github.com/uber/cadence/common/log" "github.com/uber/cadence/common/log/loggerimpl" "github.com/uber/cadence/common/metrics" "github.com/uber/cadence/common/types" @@ -52,7 +51,6 @@ type visibilityArchiverSuite struct { s3cli *mocks.S3API container *archiver.VisibilityBootstrapContainer - logger log.Logger visibilityRecords []*visibilityRecord controller *gomock.Controller diff --git a/common/archiver/util.go b/common/archiver/util.go index 012dfdcee3d..70f397b99f1 100644 --- a/common/archiver/util.go +++ b/common/archiver/util.go @@ -30,7 +30,7 @@ import ( var ( errEmptyDomainID = errors.New("DomainID is empty") - errEmptyDomainName = errors.New("Domain name is empty") + errEmptyDomainName = errors.New("DomainName is empty") errEmptyWorkflowID = errors.New("WorkflowID is empty") errEmptyRunID = errors.New("RunID is empty") errInvalidPageSize = errors.New("PageSize should be greater than 0") diff --git a/common/blobstore/filestore/client_test.go b/common/blobstore/filestore/client_test.go index 7e186d7d953..9a0a5bd37b0 100644 --- a/common/blobstore/filestore/client_test.go +++ b/common/blobstore/filestore/client_test.go @@ -23,6 +23,7 @@ package filestore import ( + "context" "io/ioutil" "os" "testing" @@ -84,6 +85,7 @@ func (s *ClientSuite) TestCrudOperations() { defer os.RemoveAll(name) c, err := NewFilestoreClient(&config.FileBlobstore{OutputDirectory: name}) s.NoError(err) + ctx := context.Background() // put three blobs in blobstore key1 := uuid.New() @@ -101,54 +103,54 @@ func (s *ClientSuite) TestCrudOperations() { Tags: map[string]string{"key1": "value1", "key2": "value2"}, Body: []byte{1, 2, 3, 4, 5}, } - _, err = c.Put(nil, &blobstore.PutRequest{ + _, err = c.Put(ctx, &blobstore.PutRequest{ Key: key1, Blob: blob1, }) s.NoError(err) - _, err = c.Put(nil, &blobstore.PutRequest{ + _, err = c.Put(ctx, &blobstore.PutRequest{ Key: key2, Blob: blob2, }) s.NoError(err) - _, err = c.Put(nil, &blobstore.PutRequest{ + _, err = c.Put(ctx, &blobstore.PutRequest{ Key: key3, Blob: blob3, }) s.NoError(err) // get the blobs back - get1, err := c.Get(nil, &blobstore.GetRequest{Key: key1}) + get1, err := c.Get(ctx, &blobstore.GetRequest{Key: key1}) s.NoError(err) s.Nil(get1.Blob.Tags) s.Equal([]byte{1, 2, 3}, get1.Blob.Body) - get2, err := c.Get(nil, &blobstore.GetRequest{Key: key2}) + get2, err := c.Get(ctx, &blobstore.GetRequest{Key: key2}) s.NoError(err) s.Equal(map[string]string{"key1": "value1"}, get2.Blob.Tags) s.Empty(get2.Blob.Body) - get3, err := c.Get(nil, &blobstore.GetRequest{Key: key3}) + get3, err := c.Get(ctx, &blobstore.GetRequest{Key: key3}) s.NoError(err) s.Equal(map[string]string{"key1": "value1", "key2": "value2"}, get3.Blob.Tags) s.Equal([]byte{1, 2, 3, 4, 5}, get3.Blob.Body) // confirm all the blobs exist - exists1, err := c.Exists(nil, &blobstore.ExistsRequest{Key: key1}) + exists1, err := c.Exists(ctx, &blobstore.ExistsRequest{Key: key1}) s.NoError(err) s.True(exists1.Exists) - exists2, err := c.Exists(nil, &blobstore.ExistsRequest{Key: key2}) + exists2, err := c.Exists(ctx, &blobstore.ExistsRequest{Key: key2}) s.NoError(err) s.True(exists2.Exists) - exists3, err := c.Exists(nil, &blobstore.ExistsRequest{Key: key3}) + exists3, err := c.Exists(ctx, &blobstore.ExistsRequest{Key: key3}) s.NoError(err) s.True(exists3.Exists) // delete a blob and confirm no longer can get and that no longer exists - _, err = c.Delete(nil, &blobstore.DeleteRequest{Key: key1}) + _, err = c.Delete(ctx, &blobstore.DeleteRequest{Key: key1}) s.NoError(err) - exists1, err = c.Exists(nil, &blobstore.ExistsRequest{Key: key1}) + exists1, err = c.Exists(ctx, &blobstore.ExistsRequest{Key: key1}) s.NoError(err) s.False(exists1.Exists) - get1, err = c.Get(nil, &blobstore.GetRequest{Key: key1}) + get1, err = c.Get(ctx, &blobstore.GetRequest{Key: key1}) s.Error(err) s.Nil(get1) } diff --git a/common/collection/concurrent_tx_map_test.go b/common/collection/concurrent_tx_map_test.go index 09400517024..2cceb62e8ca 100644 --- a/common/collection/concurrent_tx_map_test.go +++ b/common/collection/concurrent_tx_map_test.go @@ -138,8 +138,7 @@ func (s *ConcurrentTxMapSuite) TestPutOrDo() { func (s *ConcurrentTxMapSuite) TestRemoveIf() { testMap := NewShardedConcurrentTxMap(1, UUIDHashCode) key := uuid.New() - var value intType - value = intType(1) + value := intType(1) testMap.Put(key, &value) removed := testMap.RemoveIf(key, func(key interface{}, value interface{}) bool { diff --git a/common/config/metrics.go b/common/config/metrics.go index 2ac622b3da2..75a1035b36b 100644 --- a/common/config/metrics.go +++ b/common/config/metrics.go @@ -116,7 +116,13 @@ func (c *Metrics) newStatsdScope(logger log.Logger) tally.Scope { if len(config.HostPort) == 0 { return tally.NoopScope } - statter, err := statsd.NewBufferedClient(config.HostPort, config.Prefix, config.FlushInterval, config.FlushBytes) + statter, err := statsd.NewClientWithConfig(&statsd.ClientConfig{ + Address: config.HostPort, + Prefix: config.Prefix, + UseBuffered: true, + FlushInterval: config.FlushInterval, + FlushBytes: config.FlushBytes, + }) if err != nil { logger.Fatal("error creating statsd client", tag.Error(err)) } diff --git a/common/dynamicconfig/config.go b/common/dynamicconfig/config.go index c175dbc9543..cbc6b496b8d 100644 --- a/common/dynamicconfig/config.go +++ b/common/dynamicconfig/config.go @@ -421,7 +421,6 @@ func (c *Collection) GetDurationPropertyFilteredByShardID(key Key, defaultValue func (c *Collection) GetBoolProperty(key Key, defaultValue bool) BoolPropertyFn { return func(opts ...FilterOption) bool { filters := c.toFilterMap(opts...) - opts = append(opts, c.filterOptions...) val, err := c.client.GetBoolValue( key, filters, diff --git a/common/dynamicconfig/configstore/config_store_client.go b/common/dynamicconfig/configstore/config_store_client.go index 934978db8c0..a53b6f7ecaf 100644 --- a/common/dynamicconfig/configstore/config_store_client.go +++ b/common/dynamicconfig/configstore/config_store_client.go @@ -67,11 +67,6 @@ type cacheEntry struct { dcEntries map[string]*types.DynamicConfigEntry } -type fetchResult struct { - snapshot *persistence.DynamicConfigSnapshot - err error -} - // NewConfigStoreClient creates a config store client func NewConfigStoreClient(clientCfg *csc.ClientConfig, persistenceCfg *config.Persistence, logger log.Logger, doneCh chan struct{}) (dc.Client, error) { if err := validateClientConfig(clientCfg); err != nil { diff --git a/common/elasticsearch/client_v6.go b/common/elasticsearch/client_v6.go index e791675db60..4445861be8f 100644 --- a/common/elasticsearch/client_v6.go +++ b/common/elasticsearch/client_v6.go @@ -81,10 +81,7 @@ type ( ) func (c *elasticV6) IsNotFoundError(err error) bool { - if elastic.IsNotFound(err) { - return true - } - return false + return elastic.IsNotFound(err) } // NewV6Client returns a new implementation of GenericClient diff --git a/common/elasticsearch/client_v7.go b/common/elasticsearch/client_v7.go index dd4158e9856..5b0b04dab59 100644 --- a/common/elasticsearch/client_v7.go +++ b/common/elasticsearch/client_v7.go @@ -155,10 +155,7 @@ func buildSigningHTTPClientFromEnvironmentCredentialV7(credentialConfig config.A } func (c *elasticV7) IsNotFoundError(err error) bool { - if elastic.IsNotFound(err) { - return true - } - return false + return elastic.IsNotFound(err) } // root is for nested object like Attr property for search attributes. diff --git a/common/elasticsearch/esql/globals.go b/common/elasticsearch/esql/globals.go index a0f1a0b15be..1a416ba7d55 100644 --- a/common/elasticsearch/esql/globals.go +++ b/common/elasticsearch/esql/globals.go @@ -21,8 +21,6 @@ package esql import ( - "fmt" - "github.com/xwb1989/sqlparser" ) @@ -85,11 +83,3 @@ const ( TieBreakerOrder = "desc" StartTimeOrder = "desc" ) - -// DEBUG usage -//nolint -func print(v interface{}) { - fmt.Println("==============") - fmt.Println(v) - fmt.Println("==============") -} diff --git a/common/elasticsearch/esql/select.go b/common/elasticsearch/esql/select.go index 4f04e6171af..95ab8aa0f3d 100644 --- a/common/elasticsearch/esql/select.go +++ b/common/elasticsearch/esql/select.go @@ -219,25 +219,25 @@ func (e *ESql) convertParenExpr(expr sqlparser.Expr, parent sqlparser.Expr) (str func (e *ESql) convertNotExpr(expr sqlparser.Expr, parent sqlparser.Expr) (string, error) { notExpr := expr.(*sqlparser.NotExpr) exprInside := notExpr.Expr - switch (exprInside).(type) { + switch exprInside := (exprInside).(type) { case *sqlparser.NotExpr: - expr1 := exprInside.(*sqlparser.NotExpr) + expr1 := exprInside expr2 := expr1.Expr return e.convertWhereExpr(expr2, parent) case *sqlparser.AndExpr: - expr1 := exprInside.(*sqlparser.AndExpr) + expr1 := exprInside var exprLeft sqlparser.Expr = &sqlparser.NotExpr{Expr: expr1.Left} var exprRight sqlparser.Expr = &sqlparser.NotExpr{Expr: expr1.Right} var expr2 sqlparser.Expr = &sqlparser.OrExpr{Left: exprLeft, Right: exprRight} return e.convertOrExpr(expr2, parent) case *sqlparser.OrExpr: - expr1 := exprInside.(*sqlparser.OrExpr) + expr1 := exprInside var exprLeft sqlparser.Expr = &sqlparser.NotExpr{Expr: expr1.Left} var exprRight sqlparser.Expr = &sqlparser.NotExpr{Expr: expr1.Right} var expr2 sqlparser.Expr = &sqlparser.AndExpr{Left: exprLeft, Right: exprRight} return e.convertAndExpr(expr2, parent) case *sqlparser.ParenExpr: - expr1 := exprInside.(*sqlparser.ParenExpr) + expr1 := exprInside exprBody := expr1.Expr var expr2 sqlparser.Expr = &sqlparser.NotExpr{Expr: exprBody} return e.convertNotExpr(expr2, parent) diff --git a/common/log/loggerimpl/logger_test.go b/common/log/loggerimpl/logger_test.go index e6734088da8..73f1edf1461 100644 --- a/common/log/loggerimpl/logger_test.go +++ b/common/log/loggerimpl/logger_test.go @@ -49,8 +49,7 @@ func TestDefaultLogger(t *testing.T) { outC <- buf.String() }() - var zapLogger *zap.Logger - zapLogger = zap.NewExample() + zapLogger := zap.NewExample() logger := NewLogger(zapLogger) preCaller := caller(1) @@ -82,8 +81,7 @@ func TestThrottleLogger(t *testing.T) { outC <- buf.String() }() - var zapLogger *zap.Logger - zapLogger = zap.NewExample() + zapLogger := zap.NewExample() dc := dynamicconfig.NewNopClient() cln := dynamicconfig.NewCollection(dc, NewNopLogger()) @@ -116,8 +114,7 @@ func TestEmptyMsg(t *testing.T) { outC <- buf.String() }() - var zapLogger *zap.Logger - zapLogger = zap.NewExample() + zapLogger := zap.NewExample() logger := NewLogger(zapLogger) preCaller := caller(1) diff --git a/common/membership/hashring_test.go b/common/membership/hashring_test.go index 8ee4b4f70ac..61f3cdf4883 100644 --- a/common/membership/hashring_test.go +++ b/common/membership/hashring_test.go @@ -47,7 +47,7 @@ func randSeq(n int) string { } func randomHostInfo(n int) []HostInfo { - res := make([]HostInfo, n, n) + res := make([]HostInfo, n) for i := 0; i < n; i++ { res = append(res, NewHostInfo(randSeq(5))) } diff --git a/common/metrics/nop.go b/common/metrics/nop.go index 67cef9f3543..b242017d29a 100644 --- a/common/metrics/nop.go +++ b/common/metrics/nop.go @@ -39,11 +39,9 @@ func NopStopwatch() tally.Stopwatch { type noopClientImpl struct{} func (n noopClientImpl) IncCounter(scope int, counter int) { - return } func (n noopClientImpl) AddCounter(scope int, counter int, delta int64) { - return } func (n noopClientImpl) StartTimer(scope int, timer int) tally.Stopwatch { @@ -51,15 +49,12 @@ func (n noopClientImpl) StartTimer(scope int, timer int) tally.Stopwatch { } func (n noopClientImpl) RecordTimer(scope int, timer int, d time.Duration) { - return } func (m *noopClientImpl) RecordHistogramDuration(scope int, timer int, d time.Duration) { - return } func (n noopClientImpl) UpdateGauge(scope int, gauge int, value float64) { - return } func (n noopClientImpl) Scope(scope int, tags ...Tag) Scope { diff --git a/common/peerprovider/ringpopprovider/config_test.go b/common/peerprovider/ringpopprovider/config_test.go index a823e053455..3e217f5501f 100644 --- a/common/peerprovider/ringpopprovider/config_test.go +++ b/common/peerprovider/ringpopprovider/config_test.go @@ -105,8 +105,7 @@ func (resolver *mockResolver) LookupSRV(ctx context.Context, service string, pro } for _, record := range srvs { - var srvRecord net.SRV - srvRecord = record + srvRecord := record records = append(records, &srvRecord) } diff --git a/common/persistence/client/bean.go b/common/persistence/client/bean.go index 1065155debb..e5052f65ba2 100644 --- a/common/persistence/client/bean.go +++ b/common/persistence/client/bean.go @@ -26,7 +26,6 @@ import ( "sync" "github.com/uber/cadence/common/config" - cconfig "github.com/uber/cadence/common/config" es "github.com/uber/cadence/common/elasticsearch" "github.com/uber/cadence/common/messaging" "github.com/uber/cadence/common/metrics" @@ -128,7 +127,7 @@ func NewBeanFromFactory( var configStoreMgr persistence.ConfigStoreManager if datastore, ok := params.PersistenceConfig.DataStores[params.PersistenceConfig.DefaultStore]; ok { - if datastore.NoSQL != nil && datastore.NoSQL.PluginName == cconfig.StoreTypeCassandra { + if datastore.NoSQL != nil && datastore.NoSQL.PluginName == config.StoreTypeCassandra { configStoreMgr, err = factory.NewConfigStoreManager() if err != nil { return nil, err diff --git a/common/persistence/dataStoreInterfaces.go b/common/persistence/dataStoreInterfaces.go index 4eeffa7fb48..b1299f7629c 100644 --- a/common/persistence/dataStoreInterfaces.go +++ b/common/persistence/dataStoreInterfaces.go @@ -321,7 +321,7 @@ type ( NonRetriableErrors []string BranchToken []byte CronSchedule string - ExpirationSeconds time.Duration + ExpirationInterval time.Duration Memo map[string][]byte SearchAttributes map[string][]byte @@ -716,7 +716,7 @@ type ( CloseTimestamp time.Time Status types.WorkflowExecutionCloseStatus HistoryLength int64 - RetentionSeconds time.Duration + RetentionPeriod time.Duration IsCron bool NumClusters int16 } @@ -887,7 +887,7 @@ type ( // NewDataBlob returns a new DataBlob func NewDataBlob(data []byte, encodingType common.EncodingType) *DataBlob { - if data == nil || len(data) == 0 { + if len(data) == 0 { return nil } if encodingType != "thriftrw" && data[0] == 'Y' { diff --git a/common/persistence/elasticsearch/esVisibilityStore_test.go b/common/persistence/elasticsearch/esVisibilityStore_test.go index ca311ba40dc..0308f7fd2ee 100644 --- a/common/persistence/elasticsearch/esVisibilityStore_test.go +++ b/common/persistence/elasticsearch/esVisibilityStore_test.go @@ -68,7 +68,6 @@ var ( testLatestTime = int64(2547596872371000000) testWorkflowType = "test-wf-type" testWorkflowID = "test-wid" - testRunID = "1601da05-4db9-4eeb-89e4-da99481bdfc9" testCloseStatus = int32(1) testRequest = &p.InternalListWorkflowExecutionsRequest{ @@ -83,13 +82,6 @@ var ( testContextTimeout = 5 * time.Second - filterOpen = "must_not:map[exists:map[field:CloseStatus]]" - filterClose = "map[exists:map[field:CloseStatus]]" - filterByType = fmt.Sprintf("map[match:map[WorkflowType:map[query:%s]]]", testWorkflowType) - filterByWID = fmt.Sprintf("map[match:map[WorkflowID:map[query:%s]]]", testWorkflowID) - filterByRunID = fmt.Sprintf("map[match:map[RunID:map[query:%s]]]", testRunID) - filterByStatus = fmt.Sprintf("map[match:map[CloseStatus:map[query:%v]]]", testCloseStatus) - esIndexMaxResultWindow = 3 ) @@ -632,11 +624,11 @@ func (s *ESVisibilitySuite) TestGetESQueryDSL() { s.Equal(`{"query":{"bool":{"must":[{"match_phrase":{"DomainID":{"query":"bfd5c907-f899-4baf-a7b2-2ab85e623ebd"}}},{"bool":{"must":[{"match_all":{}}]}}]}},"from":0,"size":10,"sort":[{"ExecutionTime":"desc"},{"RunID":"desc"}]}`, dsl) request.Query = `order by StartTime desc, CloseTime desc` - dsl, err = v.getESQueryDSL(request, token) + _, err = v.getESQueryDSL(request, token) s.Equal(errors.New("only one field can be used to sort"), err) request.Query = `order by CustomStringField desc` - dsl, err = v.getESQueryDSL(request, token) + _, err = v.getESQueryDSL(request, token) s.Equal(errors.New("not able to sort by IndexedValueTypeString field, use IndexedValueTypeKeyword field"), err) request.Query = `order by CustomIntField asc` diff --git a/common/persistence/executionManager.go b/common/persistence/executionManager.go index 8ac1ecc69f3..5bfd900b472 100644 --- a/common/persistence/executionManager.go +++ b/common/persistence/executionManager.go @@ -182,7 +182,7 @@ func (m *executionManagerImpl) DeserializeExecutionInfo( NonRetriableErrors: info.NonRetriableErrors, BranchToken: info.BranchToken, CronSchedule: info.CronSchedule, - ExpirationSeconds: int32(info.ExpirationSeconds.Seconds()), + ExpirationSeconds: int32(info.ExpirationInterval.Seconds()), AutoResetPoints: autoResetPoints, SearchAttributes: info.SearchAttributes, Memo: info.Memo, @@ -212,7 +212,7 @@ func (m *executionManagerImpl) DeserializeChildExecutionInfos( infos map[int64]*InternalChildExecutionInfo, ) (map[int64]*ChildExecutionInfo, error) { - newInfos := make(map[int64]*ChildExecutionInfo, 0) + newInfos := make(map[int64]*ChildExecutionInfo) for k, v := range infos { initiatedEvent, err := m.serializer.DeserializeEvent(v.InitiatedEvent) if err != nil { @@ -259,7 +259,7 @@ func (m *executionManagerImpl) DeserializeActivityInfos( infos map[int64]*InternalActivityInfo, ) (map[int64]*ActivityInfo, error) { - newInfos := make(map[int64]*ActivityInfo, 0) + newInfos := make(map[int64]*ActivityInfo) for k, v := range infos { scheduledEvent, err := m.serializer.DeserializeEvent(v.ScheduledEvent) if err != nil { @@ -507,7 +507,7 @@ func (m *executionManagerImpl) SerializeExecutionInfo( NonRetriableErrors: info.NonRetriableErrors, BranchToken: info.BranchToken, CronSchedule: info.CronSchedule, - ExpirationSeconds: common.SecondsToDuration(int64(info.ExpirationSeconds)), + ExpirationInterval: common.SecondsToDuration(int64(info.ExpirationSeconds)), Memo: info.Memo, SearchAttributes: info.SearchAttributes, @@ -790,7 +790,7 @@ func (m *executionManagerImpl) ListConcreteExecutions( return nil, err } newResponse := &ListConcreteExecutionsResponse{ - Executions: make([]*ListConcreteExecutionsEntity, len(response.Executions), len(response.Executions)), + Executions: make([]*ListConcreteExecutionsEntity, len(response.Executions)), PageToken: response.NextPageToken, } for i, e := range response.Executions { @@ -967,7 +967,7 @@ func (m *executionManagerImpl) fromInternalReplicationTaskInfos(internalInfos [] if internalInfos == nil { return nil } - infos := make([]*ReplicationTaskInfo, len(internalInfos), len(internalInfos)) + infos := make([]*ReplicationTaskInfo, len(internalInfos)) for i := 0; i < len(internalInfos); i++ { infos[i] = m.fromInternalReplicationTaskInfo(internalInfos[i]) } @@ -994,17 +994,6 @@ func (m *executionManagerImpl) fromInternalReplicationTaskInfo(internalInfo *Int } } -func (m *executionManagerImpl) toInternalReplicationTaskInfos(infos []*ReplicationTaskInfo) []*InternalReplicationTaskInfo { - if infos == nil { - return nil - } - internalInfos := make([]*InternalReplicationTaskInfo, len(infos), len(infos)) - for i := 0; i < len(infos); i++ { - internalInfos[i] = m.toInternalReplicationTaskInfo(infos[i]) - } - return internalInfos -} - func (m *executionManagerImpl) toInternalReplicationTaskInfo(info *ReplicationTaskInfo) *InternalReplicationTaskInfo { if info == nil { return nil diff --git a/common/persistence/historyManager.go b/common/persistence/historyManager.go index d141bff61ab..899b02ef499 100644 --- a/common/persistence/historyManager.go +++ b/common/persistence/historyManager.go @@ -84,7 +84,7 @@ func (m *historyV2ManagerImpl) ForkHistoryBranch( if request.ForkNodeID <= 1 { return nil, &InvalidPersistenceRequestError{ - Msg: fmt.Sprintf("ForkNodeID must be > 1"), + Msg: "ForkNodeID must be > 1", } } @@ -194,7 +194,7 @@ func (m *historyV2ManagerImpl) AppendHistoryNodes( } if len(request.Events) == 0 { return nil, &InvalidPersistenceRequestError{ - Msg: fmt.Sprintf("events to be appended cannot be empty"), + Msg: "events to be appended cannot be empty", } } version := request.Events[0].Version @@ -203,18 +203,18 @@ func (m *historyV2ManagerImpl) AppendHistoryNodes( if nodeID <= 0 { return nil, &InvalidPersistenceRequestError{ - Msg: fmt.Sprintf("eventID cannot be less than 1"), + Msg: "eventID cannot be less than 1", } } for _, e := range request.Events { if e.Version != version { return nil, &InvalidPersistenceRequestError{ - Msg: fmt.Sprintf("event version must be the same inside a batch"), + Msg: "event version must be the same inside a batch", } } if e.EventID != lastID+1 { return nil, &InvalidPersistenceRequestError{ - Msg: fmt.Sprintf("event ID must be continuous"), + Msg: "event ID must be continuous", } } lastID++ @@ -385,7 +385,7 @@ func (m *historyV2ManagerImpl) readRawHistoryBranch( if token.CurrentRangeIndex == notStartedIndex { return nil, nil, 0, nil, &types.InternalDataInconsistencyError{ - Message: fmt.Sprintf("branchRange is corrupted"), + Message: "branchRange is corrupted", } } } @@ -464,7 +464,7 @@ func (m *historyV2ManagerImpl) readHistoryBranch( if len(events) == 0 { logger.Error("Empty events in a batch") return nil, nil, nil, 0, 0, &types.InternalDataInconsistencyError{ - Message: fmt.Sprintf("corrupted history event batch, empty events"), + Message: "corrupted history event batch, empty events", } } @@ -479,7 +479,7 @@ func (m *historyV2ManagerImpl) readHistoryBranch( tag.LastEventVersion(lastEvent.GetVersion()), tag.WorkflowNextEventID(lastEvent.GetEventID()), tag.Counter(eventCount)) return nil, nil, nil, 0, 0, &types.InternalDataInconsistencyError{ - Message: fmt.Sprintf("corrupted history event batch, wrong version and IDs"), + Message: "corrupted history event batch, wrong version and IDs", } } @@ -505,7 +505,7 @@ func (m *historyV2ManagerImpl) readHistoryBranch( tag.TokenLastEventVersion(token.LastEventVersion), tag.TokenLastEventID(token.LastEventID), tag.Counter(eventCount)) return nil, nil, nil, 0, 0, &types.InternalDataInconsistencyError{ - Message: fmt.Sprintf("corrupted history event batch, eventID is not continouous"), + Message: "corrupted history event batch, eventID is not continouous", } } } diff --git a/common/persistence/nosql/nosqlConfigStore.go b/common/persistence/nosql/nosqlConfigStore.go index 040aae0a6e1..7d87acc8540 100644 --- a/common/persistence/nosql/nosqlConfigStore.go +++ b/common/persistence/nosql/nosqlConfigStore.go @@ -27,7 +27,6 @@ import ( "github.com/uber/cadence/common/config" "github.com/uber/cadence/common/log" "github.com/uber/cadence/common/persistence" - p "github.com/uber/cadence/common/persistence" "github.com/uber/cadence/common/persistence/nosql/nosqlplugin" ) @@ -40,7 +39,7 @@ type ( func NewNoSQLConfigStore( cfg config.NoSQL, logger log.Logger, -) (p.ConfigStore, error) { +) (persistence.ConfigStore, error) { db, err := NewNoSQLDB(&cfg, logger) if err != nil { return nil, err @@ -54,7 +53,7 @@ func NewNoSQLConfigStore( }, nil } -func (m *nosqlConfigStore) FetchConfig(ctx context.Context, configType p.ConfigType) (*p.InternalConfigStoreEntry, error) { +func (m *nosqlConfigStore) FetchConfig(ctx context.Context, configType persistence.ConfigType) (*persistence.InternalConfigStoreEntry, error) { entry, err := m.db.SelectLatestConfig(ctx, int(configType)) if err != nil { return nil, convertCommonErrors(m.db, "FetchConfig", err) @@ -62,7 +61,7 @@ func (m *nosqlConfigStore) FetchConfig(ctx context.Context, configType p.ConfigT return entry, nil } -func (m *nosqlConfigStore) UpdateConfig(ctx context.Context, value *p.InternalConfigStoreEntry) error { +func (m *nosqlConfigStore) UpdateConfig(ctx context.Context, value *persistence.InternalConfigStoreEntry) error { err := m.db.InsertConfig(ctx, value) if err != nil { if _, ok := err.(*nosqlplugin.ConditionFailure); ok { diff --git a/common/persistence/nosql/nosqlExecutionStore.go b/common/persistence/nosql/nosqlExecutionStore.go index 61d63edce48..3bec7df9be0 100644 --- a/common/persistence/nosql/nosqlExecutionStore.go +++ b/common/persistence/nosql/nosqlExecutionStore.go @@ -238,7 +238,7 @@ func (d *nosqlExecutionStore) UpdateWorkflowExecution( if domainID != newDomainID { return &types.InternalServiceError{ - Message: fmt.Sprintf("UpdateWorkflowExecution: cannot continue as new to another domain"), + Message: "UpdateWorkflowExecution: cannot continue as new to another domain", } } @@ -309,6 +309,9 @@ func (d *nosqlExecutionStore) UpdateWorkflowExecution( // 2. new if newWorkflow != nil { insertExecution, err = d.prepareCreateWorkflowExecutionRequestWithMaps(newWorkflow) + if err != nil { + return err + } nosqlTransferTasks, nosqlCrossClusterTasks, nosqlReplicationTasks, nosqlTimerTasks, err = d.prepareNoSQLTasksForWorkflowTxn( domainID, workflowID, newWorkflow.ExecutionInfo.RunID, @@ -446,6 +449,9 @@ func (d *nosqlExecutionStore) ConflictResolveWorkflowExecution( // 3. new if newWorkflow != nil { insertExecution, err = d.prepareCreateWorkflowExecutionRequestWithMaps(newWorkflow) + if err != nil { + return err + } nosqlTransferTasks, nosqlCrossClusterTasks, nosqlReplicationTasks, nosqlTimerTasks, err = d.prepareNoSQLTasksForWorkflowTxn( domainID, workflowID, newWorkflow.ExecutionInfo.RunID, diff --git a/common/persistence/nosql/nosqlHistoryStore.go b/common/persistence/nosql/nosqlHistoryStore.go index b90335decb9..39031b75396 100644 --- a/common/persistence/nosql/nosqlHistoryStore.go +++ b/common/persistence/nosql/nosqlHistoryStore.go @@ -22,7 +22,6 @@ package nosql import ( "context" - "fmt" "time" "github.com/uber/cadence/common" @@ -84,7 +83,7 @@ func (h *nosqlHistoryStore) AppendHistoryNodes( if request.NodeID < beginNodeID { return &p.InvalidPersistenceRequestError{ - Msg: fmt.Sprintf("cannot append to ancestors' nodes"), + Msg: "cannot append to ancestors' nodes", } } @@ -92,9 +91,7 @@ func (h *nosqlHistoryStore) AppendHistoryNodes( var treeRow *nosqlplugin.HistoryTreeRow if request.IsNewBranch { var ancestors []*types.HistoryBranchRange - for _, anc := range branchInfo.Ancestors { - ancestors = append(ancestors, anc) - } + ancestors = append(ancestors, branchInfo.Ancestors...) treeRow = &nosqlplugin.HistoryTreeRow{ ShardID: request.ShardID, TreeID: branchInfo.TreeID, @@ -171,11 +168,11 @@ func (h *nosqlHistoryStore) ReadHistoryBranch( switch { case nodeID < lastNodeID: return nil, &types.InternalDataInconsistencyError{ - Message: fmt.Sprintf("corrupted data, nodeID cannot decrease"), + Message: "corrupted data, nodeID cannot decrease", } case nodeID == lastNodeID: return nil, &types.InternalDataInconsistencyError{ - Message: fmt.Sprintf("corrupted data, same nodeID must have smaller txnID"), + Message: "corrupted data, same nodeID must have smaller txnID", } default: // row.NodeID > lastNodeID: // NOTE: when row.nodeID > lastNodeID, we expect the one with largest txnID comes first diff --git a/common/persistence/nosql/nosqlQueueStore.go b/common/persistence/nosql/nosqlQueueStore.go index 4c14bd65722..4c8086754c2 100644 --- a/common/persistence/nosql/nosqlQueueStore.go +++ b/common/persistence/nosql/nosqlQueueStore.go @@ -331,7 +331,7 @@ func (q *nosqlQueueStore) updateQueueMetadata( if err != nil { if _, ok := err.(*nosqlplugin.ConditionFailure); ok { return &types.InternalServiceError{ - Message: fmt.Sprintf("UpdateQueueMetadata operation encounter concurrent write."), + Message: "UpdateQueueMetadata operation encounter concurrent write.", } } diff --git a/common/persistence/nosql/nosqlTaskStore.go b/common/persistence/nosql/nosqlTaskStore.go index 1dc9bb1c025..29aa2c291a7 100644 --- a/common/persistence/nosql/nosqlTaskStore.go +++ b/common/persistence/nosql/nosqlTaskStore.go @@ -80,7 +80,7 @@ func (t *nosqlTaskStore) LeaseTaskList( ) (*p.LeaseTaskListResponse, error) { if len(request.TaskList) == 0 { return nil, &types.InternalServiceError{ - Message: fmt.Sprintf("LeaseTaskList requires non empty task list"), + Message: "LeaseTaskList requires non empty task list", } } now := time.Now() @@ -194,7 +194,7 @@ func (t *nosqlTaskStore) ListTaskList( _ *p.ListTaskListRequest, ) (*p.ListTaskListResponse, error) { return nil, &types.InternalServiceError{ - Message: fmt.Sprintf("unsupported operation"), + Message: "unsupported operation", } } diff --git a/common/persistence/nosql/nosqlVisibilityStore.go b/common/persistence/nosql/nosqlVisibilityStore.go index 02cd226cd30..2c873e82285 100644 --- a/common/persistence/nosql/nosqlVisibilityStore.go +++ b/common/persistence/nosql/nosqlVisibilityStore.go @@ -97,7 +97,7 @@ func (v *nosqlVisibilityStore) RecordWorkflowExecutionClosed( request *p.InternalRecordWorkflowExecutionClosedRequest, ) error { // Find how long to keep the row - retention := request.RetentionSeconds + retention := request.RetentionPeriod if retention == 0 { retention = defaultCloseTTLSeconds * time.Second } diff --git a/common/persistence/nosql/nosqlplugin/cassandra/domain.go b/common/persistence/nosql/nosqlplugin/cassandra/domain.go index f17bb888c1c..dcef4ff2dee 100644 --- a/common/persistence/nosql/nosqlplugin/cassandra/domain.go +++ b/common/persistence/nosql/nosqlplugin/cassandra/domain.go @@ -427,8 +427,7 @@ func (db *cdb) SelectAllDomains( pageSize int, pageToken []byte, ) ([]*nosqlplugin.DomainRow, []byte, error) { - var query gocql.Query - query = db.session.Query(templateListDomainQueryV2, constDomainPartition).WithContext(ctx) + query := db.session.Query(templateListDomainQueryV2, constDomainPartition).WithContext(ctx) iter := query.PageSize(pageSize).PageState(pageToken).Iter() if iter == nil { return nil, nil, &types.InternalServiceError{ diff --git a/common/persistence/nosql/nosqlplugin/cassandra/tasks.go b/common/persistence/nosql/nosqlplugin/cassandra/tasks.go index eac60ff8b0b..49aeb785c39 100644 --- a/common/persistence/nosql/nosqlplugin/cassandra/tasks.go +++ b/common/persistence/nosql/nosqlplugin/cassandra/tasks.go @@ -79,13 +79,6 @@ const ( `and task_id > ? ` + `and task_id <= ?` - templateCompleteTaskQuery = `DELETE FROM tasks ` + - `WHERE domain_id = ? ` + - `and task_list_name = ? ` + - `and task_list_type = ? ` + - `and type = ? ` + - `and task_id = ?` - templateCompleteTasksLessThanQuery = `DELETE FROM tasks ` + `WHERE domain_id = ? ` + `AND task_list_name = ? ` + @@ -307,7 +300,7 @@ func (db *cdb) UpdateTaskListWithTTL( // Noop if TTL is already implemented in other methods func (db *cdb) ListTaskList(ctx context.Context, pageSize int, nextPageToken []byte) (*nosqlplugin.ListTaskListResult, error) { return nil, &types.InternalServiceError{ - Message: fmt.Sprintf("unsupported operation"), + Message: "unsupported operation", } } diff --git a/common/persistence/nosql/nosqlplugin/cassandra/workflowParsingUtils.go b/common/persistence/nosql/nosqlplugin/cassandra/workflowParsingUtils.go index d39d2037b99..f5a476678ba 100644 --- a/common/persistence/nosql/nosqlplugin/cassandra/workflowParsingUtils.go +++ b/common/persistence/nosql/nosqlplugin/cassandra/workflowParsingUtils.go @@ -163,7 +163,7 @@ func parseWorkflowExecutionInfo( case "cron_schedule": info.CronSchedule = v.(string) case "expiration_seconds": - info.ExpirationSeconds = common.SecondsToDuration(int64(v.(int))) + info.ExpirationInterval = common.SecondsToDuration(int64(v.(int))) case "search_attributes": info.SearchAttributes = v.(map[string][]byte) case "memo": diff --git a/common/persistence/nosql/nosqlplugin/cassandra/workflowUtils.go b/common/persistence/nosql/nosqlplugin/cassandra/workflowUtils.go index 9cc93897f49..834804f5907 100644 --- a/common/persistence/nosql/nosqlplugin/cassandra/workflowUtils.go +++ b/common/persistence/nosql/nosqlplugin/cassandra/workflowUtils.go @@ -1262,7 +1262,7 @@ func (db *cdb) updateWorkflowExecution( persistence.EventStoreVersion, execution.BranchToken, execution.CronSchedule, - int32(execution.ExpirationSeconds.Seconds()), + int32(execution.ExpirationInterval.Seconds()), execution.SearchAttributes, execution.Memo, execution.NextEventID, @@ -1356,7 +1356,7 @@ func (db *cdb) createWorkflowExecution( persistence.EventStoreVersion, execution.BranchToken, execution.CronSchedule, - int32(execution.ExpirationSeconds.Seconds()), + int32(execution.ExpirationInterval.Seconds()), execution.SearchAttributes, execution.Memo, execution.NextEventID, diff --git a/common/persistence/nosql/nosqlplugin/dynamodb/db.go b/common/persistence/nosql/nosqlplugin/dynamodb/db.go index fba26afc089..8a572d24ddf 100644 --- a/common/persistence/nosql/nosqlplugin/dynamodb/db.go +++ b/common/persistence/nosql/nosqlplugin/dynamodb/db.go @@ -40,7 +40,6 @@ var ( // ddb represents a logical connection to DynamoDB database type ddb struct { - logger log.Logger } var _ nosqlplugin.DB = (*ddb)(nil) @@ -71,8 +70,5 @@ func (db *ddb) IsThrottlingError(err error) bool { } func (db *ddb) IsConditionFailedError(err error) bool { - if err == errConditionFailed { - return true - } - return false + return err == errConditionFailed } diff --git a/common/persistence/nosql/nosqlplugin/dynamodb/domain.go b/common/persistence/nosql/nosqlplugin/dynamodb/domain.go index 70bf6480e58..3bf569e8745 100644 --- a/common/persistence/nosql/nosqlplugin/dynamodb/domain.go +++ b/common/persistence/nosql/nosqlplugin/dynamodb/domain.go @@ -24,7 +24,6 @@ import ( "context" "github.com/uber/cadence/common/persistence/nosql/nosqlplugin" - "github.com/uber/cadence/common/persistence/nosql/nosqlplugin/cassandra/gocql" ) // Insert a new record to domain, return error if failed or already exists @@ -36,14 +35,6 @@ func (db *ddb) InsertDomain( panic("TODO") } -func (db *ddb) updateMetadataBatch( - ctx context.Context, - batch gocql.Batch, - notificationVersion int64, -) { - panic("TODO") -} - // Update domain func (db *ddb) UpdateDomain( ctx context.Context, diff --git a/common/persistence/nosql/nosqlplugin/mongodb/domain.go b/common/persistence/nosql/nosqlplugin/mongodb/domain.go index cc0822be937..0f0a568ab94 100644 --- a/common/persistence/nosql/nosqlplugin/mongodb/domain.go +++ b/common/persistence/nosql/nosqlplugin/mongodb/domain.go @@ -24,7 +24,6 @@ import ( "context" "github.com/uber/cadence/common/persistence/nosql/nosqlplugin" - "github.com/uber/cadence/common/persistence/nosql/nosqlplugin/cassandra/gocql" ) // Insert a new record to domain, return error if failed or already exists @@ -36,14 +35,6 @@ func (db *mdb) InsertDomain( panic("TODO") } -func (db *mdb) updateMetadataBatch( - ctx context.Context, - batch gocql.Batch, - notificationVersion int64, -) { - panic("TODO") -} - // Update domain func (db *mdb) UpdateDomain( ctx context.Context, diff --git a/common/persistence/persistence-tests/executionManagerTest.go b/common/persistence/persistence-tests/executionManagerTest.go index 58f386d6b91..0c7118b8e60 100644 --- a/common/persistence/persistence-tests/executionManagerTest.go +++ b/common/persistence/persistence-tests/executionManagerTest.go @@ -756,6 +756,7 @@ func (s *ExecutionManagerSuite) TestUpdateWorkflowExecutionTasks() { s.NotNil(task0, "Expected non empty task identifier.") taskD, err := s.GetTransferTasks(ctx, 1, false) + s.NoError(err) s.Equal(1, len(taskD), "Expected 1 decision task.") err = s.CompleteTransferTask(ctx, taskD[0].TaskID) s.NoError(err) @@ -990,6 +991,7 @@ func (s *ExecutionManagerSuite) TestUpsertWorkflowActivity() { RangeID: s.ShardInfo.RangeID, Mode: p.UpdateWorkflowModeUpdateCurrent, }) + s.NoError(err) info3, err := s.GetWorkflowExecutionInfo(ctx, domainID, workflowExecution) s.Nil(err) s.Equal(1, len(info3.ActivityInfos)) @@ -2077,6 +2079,7 @@ func (s *ExecutionManagerSuite) TestCancelTransferTaskTasks() { s.NotNil(task0, "Expected non empty task identifier.") taskD, err := s.GetTransferTasks(ctx, 1, false) + s.NoError(err) s.Equal(1, len(taskD), "Expected 1 decision task.") err = s.CompleteTransferTask(ctx, taskD[0].TaskID) s.NoError(err) @@ -2184,6 +2187,7 @@ func (s *ExecutionManagerSuite) TestSignalTransferTaskTasks() { s.NotNil(task0, "Expected non empty task identifier.") taskD, err := s.GetTransferTasks(ctx, 1, false) + s.NoError(err) s.Equal(1, len(taskD), "Expected 1 decision task.") err = s.CompleteTransferTask(ctx, taskD[0].TaskID) s.NoError(err) @@ -2290,6 +2294,7 @@ func (s *ExecutionManagerSuite) TestReplicationTasks() { s.NoError(err) s.NotNil(task0, "Expected non empty task identifier.") taskD, err := s.GetTransferTasks(ctx, 1, false) + s.NoError(err) s.Equal(1, len(taskD), "Expected 1 decision task.") err = s.CompleteTransferTask(ctx, taskD[0].TaskID) s.NoError(err) @@ -3416,6 +3421,7 @@ func (s *ExecutionManagerSuite) TestReplicationTransferTaskTasks() { s.NotNil(task0, "Expected non empty task identifier.") taskD, err := s.GetTransferTasks(ctx, 1, false) + s.NoError(err) s.Equal(1, len(taskD), "Expected 1 decision task.") err = s.CompleteTransferTask(ctx, taskD[0].TaskID) s.NoError(err) @@ -3479,6 +3485,7 @@ func (s *ExecutionManagerSuite) TestReplicationTransferTaskRangeComplete() { s.NotNil(task0, "Expected non empty task identifier.") taskD, err := s.GetTransferTasks(ctx, 1, false) + s.NoError(err) s.Equal(1, len(taskD), "Expected 1 decision task.") err = s.CompleteTransferTask(ctx, taskD[0].TaskID) s.NoError(err) @@ -4230,6 +4237,7 @@ func (s *ExecutionManagerSuite) TestConflictResolveWorkflowExecutionWithCASMisma s.NoError(err) runID1, err := s.GetCurrentWorkflowRunID(ctx, domainID, workflowID) + s.NoError(err) s.Equal(workflowExecutionCurrent.GetRunID(), runID1) state, err = s.GetWorkflowExecutionInfo(ctx, domainID, workflowExecutionCurrent) s.NoError(err) @@ -4249,6 +4257,7 @@ func (s *ExecutionManagerSuite) TestConflictResolveWorkflowExecutionWithCASMisma err3 := s.UpdateWorkflowExecutionAndFinish(ctx, currentInfo, currentStats, int64(3), versionHistories) s.NoError(err3) runID1, err = s.GetCurrentWorkflowRunID(ctx, domainID, workflowID) + s.NoError(err) s.Equal(workflowExecutionCurrent.GetRunID(), runID1) resetExecutionInfo := &p.WorkflowExecutionInfo{ @@ -4327,6 +4336,7 @@ func (s *ExecutionManagerSuite) TestConflictResolveWorkflowExecutionWithCASMisma // this test only assert whether the current workflow execution record is reset runID, err := s.GetCurrentWorkflowRunID(ctx, domainID, workflowID) + s.NoError(err) s.Equal(workflowExecutionCurrent.GetRunID(), runID) } @@ -4388,6 +4398,7 @@ func (s *ExecutionManagerSuite) TestConflictResolveWorkflowExecutionWithTransact s.NoError(err) currentRunID, err := s.GetCurrentWorkflowRunID(ctx, domainID, workflowID) + s.NoError(err) s.Equal(workflowExecutionCurrent.GetRunID(), currentRunID) state, err = s.GetWorkflowExecutionInfo(ctx, domainID, workflowExecutionCurrent) s.NoError(err) @@ -4549,6 +4560,7 @@ func (s *ExecutionManagerSuite) TestConflictResolveWorkflowExecutionWithTransact s.NoError(err) currentRunID, err := s.GetCurrentWorkflowRunID(ctx, domainID, workflowID) + s.NoError(err) s.Equal(workflowExecutionCurrent.GetRunID(), currentRunID) state, err = s.GetWorkflowExecutionInfo(ctx, domainID, workflowExecutionCurrent) s.NoError(err) @@ -4990,6 +5002,7 @@ func (s *ExecutionManagerSuite) TestConflictResolveWorkflowExecutionWithTransact s.NoError(err) currentRunID, err := s.GetCurrentWorkflowRunID(ctx, domainID, workflowID) + s.NoError(err) s.Equal(workflowExecutionCurrent.GetRunID(), currentRunID) resetExecutionInfo := &p.WorkflowExecutionInfo{ @@ -5123,6 +5136,7 @@ func (s *ExecutionManagerSuite) TestConflictResolveWorkflowExecutionWithTransact s.NoError(err) currentRunID, err := s.GetCurrentWorkflowRunID(ctx, domainID, workflowID) + s.NoError(err) s.Equal(workflowExecutionCurrent.GetRunID(), currentRunID) decisionScheduleID := int64(111) @@ -5358,8 +5372,5 @@ func timestampConvertor(t time.Time) time.Time { func timeComparator(t1, t2 time.Time, timeTolerance time.Duration) bool { diff := t2.Sub(t1) - if diff.Nanoseconds() <= timeTolerance.Nanoseconds() { - return true - } - return false + return diff.Nanoseconds() <= timeTolerance.Nanoseconds() } diff --git a/common/persistence/persistence-tests/executionManagerTestForEventsV2.go b/common/persistence/persistence-tests/executionManagerTestForEventsV2.go index 00bbf1fc660..f0cbf8d1c61 100644 --- a/common/persistence/persistence-tests/executionManagerTestForEventsV2.go +++ b/common/persistence/persistence-tests/executionManagerTestForEventsV2.go @@ -406,77 +406,3 @@ func (s *ExecutionManagerSuiteForEventsV2) TestContinueAsNew() { s.NoError(err5) s.Equal(newWorkflowExecution.RunID, newRunID) } - -func (s *ExecutionManagerSuiteForEventsV2) createWorkflowExecution( - ctx context.Context, - domainID string, - workflowExecution types.WorkflowExecution, - taskList string, - wType string, - wTimeout int32, - decisionTimeout int32, - nextEventID int64, - lastProcessedEventID int64, - decisionScheduleID int64, - txTasks []p.Task, - brToken []byte, -) (*p.CreateWorkflowExecutionResponse, error) { - - var transferTasks []p.Task - var replicationTasks []p.Task - var timerTasks []p.Task - for _, task := range txTasks { - switch t := task.(type) { - case *p.DecisionTask, *p.ActivityTask, *p.CloseExecutionTask, *p.CancelExecutionTask, *p.StartChildExecutionTask, *p.SignalExecutionTask, *p.RecordWorkflowStartedTask: - transferTasks = append(transferTasks, t) - case *p.HistoryReplicationTask: - replicationTasks = append(replicationTasks, t) - case *p.WorkflowTimeoutTask, *p.DeleteHistoryEventTask: - timerTasks = append(timerTasks, t) - default: - panic("Unknown transfer task type.") - } - } - - transferTasks = append(transferTasks, &p.DecisionTask{ - TaskID: s.GetNextSequenceNumber(), - DomainID: domainID, - TaskList: taskList, - ScheduleID: decisionScheduleID, - }) - versionHistory := p.NewVersionHistory([]byte{}, []*p.VersionHistoryItem{ - {decisionScheduleID, common.EmptyVersion}, - }) - versionHistories := p.NewVersionHistories(versionHistory) - response, err := s.ExecutionManager.CreateWorkflowExecution(ctx, &p.CreateWorkflowExecutionRequest{ - NewWorkflowSnapshot: p.WorkflowSnapshot{ - ExecutionInfo: &p.WorkflowExecutionInfo{ - CreateRequestID: uuid.New(), - DomainID: domainID, - WorkflowID: workflowExecution.GetWorkflowID(), - RunID: workflowExecution.GetRunID(), - TaskList: taskList, - WorkflowTypeName: wType, - WorkflowTimeout: wTimeout, - DecisionStartToCloseTimeout: decisionTimeout, - State: p.WorkflowStateRunning, - CloseStatus: p.WorkflowCloseStatusNone, - NextEventID: nextEventID, - LastProcessedEvent: lastProcessedEventID, - DecisionScheduleID: decisionScheduleID, - DecisionStartedID: common.EmptyEventID, - DecisionTimeout: 1, - BranchToken: brToken, - }, - ExecutionStats: &p.ExecutionStats{}, - TimerTasks: timerTasks, - TransferTasks: transferTasks, - ReplicationTasks: replicationTasks, - Checksum: testWorkflowChecksum, - VersionHistories: versionHistories, - }, - RangeID: s.ShardInfo.RangeID, - }) - - return response, err -} diff --git a/common/persistence/persistence-tests/historyV2PersistenceTest.go b/common/persistence/persistence-tests/historyV2PersistenceTest.go index 860484a091f..ef023aea3f6 100644 --- a/common/persistence/persistence-tests/historyV2PersistenceTest.go +++ b/common/persistence/persistence-tests/historyV2PersistenceTest.go @@ -159,7 +159,7 @@ func (s *HistoryV2PersistenceSuite) TestScanAllTrees() { }) s.Nil(err) for _, br := range resp.Branches { - if trees[br.TreeID] == true { + if trees[br.TreeID] { delete(trees, br.TreeID) s.True(br.ForkTime.UnixNano() > 0) @@ -755,8 +755,7 @@ func (s *HistoryV2PersistenceSuite) deleteHistoryBranch(ctx context.Context, bra } op := func() error { - var err error - err = s.HistoryV2Mgr.DeleteHistoryBranch(ctx, &p.DeleteHistoryBranchRequest{ + err := s.HistoryV2Mgr.DeleteHistoryBranch(ctx, &p.DeleteHistoryBranchRequest{ BranchToken: branchToken, ShardID: common.IntPtr(s.ShardInfo.ShardID), }) diff --git a/common/persistence/persistence-tests/matchingPersistenceTest.go b/common/persistence/persistence-tests/matchingPersistenceTest.go index a2da227b9a6..4df15a3914a 100644 --- a/common/persistence/persistence-tests/matchingPersistenceTest.go +++ b/common/persistence/persistence-tests/matchingPersistenceTest.go @@ -331,7 +331,7 @@ func (s *MatchingPersistenceSuite) TestLeaseAndUpdateTaskList() { s.EqualValues(0, tli.AckLevel) s.True(tli.LastUpdated.After(leaseTime) || tli.LastUpdated.Equal(leaseTime)) - response, err = s.TaskMgr.LeaseTaskList(ctx, &p.LeaseTaskListRequest{ + _, err = s.TaskMgr.LeaseTaskList(ctx, &p.LeaseTaskListRequest{ DomainID: domainID, TaskList: taskList, TaskType: p.TaskListTypeActivity, @@ -570,7 +570,7 @@ func (s *MatchingPersistenceSuite) TestGetOrphanTasks() { existingOrphans := len(oresp.Tasks) domainID := uuid.New() - name := fmt.Sprintf("test-list-with-orphans") + name := "test-list-with-orphans" resp, err := s.TaskMgr.LeaseTaskList(ctx, &p.LeaseTaskListRequest{ DomainID: domainID, TaskList: name, diff --git a/common/persistence/persistence-tests/persistenceTestBase.go b/common/persistence/persistence-tests/persistenceTestBase.go index 2d44fb1e4b4..19d43132f79 100644 --- a/common/persistence/persistence-tests/persistenceTestBase.go +++ b/common/persistence/persistence-tests/persistenceTestBase.go @@ -42,7 +42,6 @@ import ( "github.com/uber/cadence/common/log/tag" "github.com/uber/cadence/common/metrics" "github.com/uber/cadence/common/persistence" - p "github.com/uber/cadence/common/persistence" "github.com/uber/cadence/common/persistence/client" "github.com/uber/cadence/common/persistence/nosql" "github.com/uber/cadence/common/persistence/persistence-tests/testcluster" @@ -74,21 +73,21 @@ type ( // TestBase wraps the base setup needed to create workflows over persistence layer. TestBase struct { suite.Suite - ShardMgr p.ShardManager + ShardMgr persistence.ShardManager ExecutionMgrFactory client.Factory - ExecutionManager p.ExecutionManager - TaskMgr p.TaskManager - HistoryV2Mgr p.HistoryManager - DomainManager p.DomainManager - DomainReplicationQueueMgr p.QueueManager - ShardInfo *p.ShardInfo + ExecutionManager persistence.ExecutionManager + TaskMgr persistence.TaskManager + HistoryV2Mgr persistence.HistoryManager + DomainManager persistence.DomainManager + DomainReplicationQueueMgr persistence.QueueManager + ShardInfo *persistence.ShardInfo TaskIDGenerator TransferTaskIDGenerator ClusterMetadata cluster.Metadata DefaultTestCluster testcluster.PersistenceTestCluster VisibilityTestCluster testcluster.PersistenceTestCluster Logger log.Logger - PayloadSerializer p.PayloadSerializer - ConfigStoreManager p.ConfigStoreManager + PayloadSerializer persistence.PayloadSerializer + ConfigStoreManager persistence.ConfigStoreManager } // TestBaseParams defines the input of TestBase @@ -118,7 +117,7 @@ func NewTestBaseFromParams(params TestBaseParams) TestBase { DefaultTestCluster: params.DefaultTestCluster, VisibilityTestCluster: params.VisibilityTestCluster, ClusterMetadata: params.ClusterMetadata, - PayloadSerializer: p.NewPayloadSerializer(), + PayloadSerializer: persistence.NewPayloadSerializer(), Logger: logger, } } @@ -232,7 +231,7 @@ func (s *TestBase) Setup() { } timerPQS := types.ProcessingQueueStates{StatesByCluster: timerPQSMap} - s.ShardInfo = &p.ShardInfo{ + s.ShardInfo = &persistence.ShardInfo{ ShardID: shardID, RangeID: 0, TransferAckLevel: 0, @@ -245,7 +244,7 @@ func (s *TestBase) Setup() { } s.TaskIDGenerator = &TestTransferTaskIDGenerator{} - err = s.ShardMgr.CreateShard(context.Background(), &p.CreateShardRequest{ShardInfo: s.ShardInfo}) + err = s.ShardMgr.CreateShard(context.Background(), &persistence.CreateShardRequest{ShardInfo: s.ShardInfo}) s.fatalOnError("CreateShard", err) queue, err := factory.NewDomainReplicationQueueManager() @@ -261,20 +260,20 @@ func (s *TestBase) fatalOnError(msg string, err error) { // CreateShard is a utility method to create the shard using persistence layer func (s *TestBase) CreateShard(ctx context.Context, shardID int, owner string, rangeID int64) error { - info := &p.ShardInfo{ + info := &persistence.ShardInfo{ ShardID: shardID, Owner: owner, RangeID: rangeID, } - return s.ShardMgr.CreateShard(ctx, &p.CreateShardRequest{ + return s.ShardMgr.CreateShard(ctx, &persistence.CreateShardRequest{ ShardInfo: info, }) } // GetShard is a utility method to get the shard using persistence layer -func (s *TestBase) GetShard(ctx context.Context, shardID int) (*p.ShardInfo, error) { - response, err := s.ShardMgr.GetShard(ctx, &p.GetShardRequest{ +func (s *TestBase) GetShard(ctx context.Context, shardID int) (*persistence.ShardInfo, error) { + response, err := s.ShardMgr.GetShard(ctx, &persistence.GetShardRequest{ ShardID: shardID, }) @@ -286,8 +285,8 @@ func (s *TestBase) GetShard(ctx context.Context, shardID int) (*p.ShardInfo, err } // UpdateShard is a utility method to update the shard using persistence layer -func (s *TestBase) UpdateShard(ctx context.Context, updatedInfo *p.ShardInfo, previousRangeID int64) error { - return s.ShardMgr.UpdateShard(ctx, &p.UpdateShardRequest{ +func (s *TestBase) UpdateShard(ctx context.Context, updatedInfo *persistence.ShardInfo, previousRangeID int64) error { + return s.ShardMgr.UpdateShard(ctx, &persistence.UpdateShardRequest{ ShardInfo: updatedInfo, PreviousRangeID: previousRangeID, }) @@ -307,17 +306,17 @@ func (s *TestBase) CreateWorkflowExecutionWithBranchToken( lastProcessedEventID int64, decisionScheduleID int64, branchToken []byte, - timerTasks []p.Task, -) (*p.CreateWorkflowExecutionResponse, error) { + timerTasks []persistence.Task, +) (*persistence.CreateWorkflowExecutionResponse, error) { now := time.Now() - versionHistory := p.NewVersionHistory(branchToken, []*p.VersionHistoryItem{ + versionHistory := persistence.NewVersionHistory(branchToken, []*persistence.VersionHistoryItem{ {decisionScheduleID, common.EmptyVersion}, }) - versionHistories := p.NewVersionHistories(versionHistory) - response, err := s.ExecutionManager.CreateWorkflowExecution(ctx, &p.CreateWorkflowExecutionRequest{ - NewWorkflowSnapshot: p.WorkflowSnapshot{ - ExecutionInfo: &p.WorkflowExecutionInfo{ + versionHistories := persistence.NewVersionHistories(versionHistory) + response, err := s.ExecutionManager.CreateWorkflowExecution(ctx, &persistence.CreateWorkflowExecutionRequest{ + NewWorkflowSnapshot: persistence.WorkflowSnapshot{ + ExecutionInfo: &persistence.WorkflowExecutionInfo{ CreateRequestID: uuid.New(), DomainID: domainID, WorkflowID: workflowExecution.GetWorkflowID(), @@ -327,8 +326,8 @@ func (s *TestBase) CreateWorkflowExecutionWithBranchToken( WorkflowTimeout: wTimeout, DecisionStartToCloseTimeout: decisionTimeout, ExecutionContext: executionContext, - State: p.WorkflowStateRunning, - CloseStatus: p.WorkflowCloseStatusNone, + State: persistence.WorkflowStateRunning, + CloseStatus: persistence.WorkflowCloseStatusNone, LastFirstEventID: common.FirstEventID, NextEventID: nextEventID, LastProcessedEvent: lastProcessedEventID, @@ -339,9 +338,9 @@ func (s *TestBase) CreateWorkflowExecutionWithBranchToken( DecisionTimeout: 1, BranchToken: branchToken, }, - ExecutionStats: &p.ExecutionStats{}, - TransferTasks: []p.Task{ - &p.DecisionTask{ + ExecutionStats: &persistence.ExecutionStats{}, + TransferTasks: []persistence.Task{ + &persistence.DecisionTask{ TaskID: s.GetNextSequenceNumber(), DomainID: domainID, TaskList: taskList, @@ -372,8 +371,8 @@ func (s *TestBase) CreateWorkflowExecution( nextEventID int64, lastProcessedEventID int64, decisionScheduleID int64, - timerTasks []p.Task, -) (*p.CreateWorkflowExecutionResponse, error) { + timerTasks []persistence.Task, +) (*persistence.CreateWorkflowExecutionResponse, error) { return s.CreateWorkflowExecutionWithBranchToken(ctx, domainID, workflowExecution, taskList, wType, wTimeout, decisionTimeout, executionContext, nextEventID, lastProcessedEventID, decisionScheduleID, nil, timerTasks) @@ -383,15 +382,15 @@ func (s *TestBase) CreateWorkflowExecution( func (s *TestBase) CreateChildWorkflowExecution(ctx context.Context, domainID string, workflowExecution types.WorkflowExecution, parentDomainID string, parentExecution types.WorkflowExecution, initiatedID int64, taskList, wType string, wTimeout int32, decisionTimeout int32, executionContext []byte, nextEventID int64, lastProcessedEventID int64, - decisionScheduleID int64, timerTasks []p.Task) (*p.CreateWorkflowExecutionResponse, error) { + decisionScheduleID int64, timerTasks []persistence.Task) (*persistence.CreateWorkflowExecutionResponse, error) { now := time.Now() - versionHistory := p.NewVersionHistory([]byte{}, []*p.VersionHistoryItem{ + versionHistory := persistence.NewVersionHistory([]byte{}, []*persistence.VersionHistoryItem{ {decisionScheduleID, common.EmptyVersion}, }) - versionHistories := p.NewVersionHistories(versionHistory) - response, err := s.ExecutionManager.CreateWorkflowExecution(ctx, &p.CreateWorkflowExecutionRequest{ - NewWorkflowSnapshot: p.WorkflowSnapshot{ - ExecutionInfo: &p.WorkflowExecutionInfo{ + versionHistories := persistence.NewVersionHistories(versionHistory) + response, err := s.ExecutionManager.CreateWorkflowExecution(ctx, &persistence.CreateWorkflowExecutionRequest{ + NewWorkflowSnapshot: persistence.WorkflowSnapshot{ + ExecutionInfo: &persistence.WorkflowExecutionInfo{ CreateRequestID: uuid.New(), DomainID: domainID, WorkflowID: workflowExecution.GetWorkflowID(), @@ -405,8 +404,8 @@ func (s *TestBase) CreateChildWorkflowExecution(ctx context.Context, domainID st WorkflowTimeout: wTimeout, DecisionStartToCloseTimeout: decisionTimeout, ExecutionContext: executionContext, - State: p.WorkflowStateCreated, - CloseStatus: p.WorkflowCloseStatusNone, + State: persistence.WorkflowStateCreated, + CloseStatus: persistence.WorkflowCloseStatusNone, LastFirstEventID: common.FirstEventID, NextEventID: nextEventID, LastProcessedEvent: lastProcessedEventID, @@ -416,9 +415,9 @@ func (s *TestBase) CreateChildWorkflowExecution(ctx context.Context, domainID st DecisionStartedID: common.EmptyEventID, DecisionTimeout: 1, }, - ExecutionStats: &p.ExecutionStats{}, - TransferTasks: []p.Task{ - &p.DecisionTask{ + ExecutionStats: &persistence.ExecutionStats{}, + TransferTasks: []persistence.Task{ + &persistence.DecisionTask{ TaskID: s.GetNextSequenceNumber(), DomainID: domainID, TaskList: taskList, @@ -436,8 +435,8 @@ func (s *TestBase) CreateChildWorkflowExecution(ctx context.Context, domainID st // GetWorkflowExecutionInfoWithStats is a utility method to retrieve execution info with size stats func (s *TestBase) GetWorkflowExecutionInfoWithStats(ctx context.Context, domainID string, workflowExecution types.WorkflowExecution) ( - *p.MutableStateStats, *p.WorkflowMutableState, error) { - response, err := s.ExecutionManager.GetWorkflowExecution(ctx, &p.GetWorkflowExecutionRequest{ + *persistence.MutableStateStats, *persistence.WorkflowMutableState, error) { + response, err := s.ExecutionManager.GetWorkflowExecution(ctx, &persistence.GetWorkflowExecutionRequest{ DomainID: domainID, Execution: workflowExecution, }) @@ -450,8 +449,8 @@ func (s *TestBase) GetWorkflowExecutionInfoWithStats(ctx context.Context, domain // GetWorkflowExecutionInfo is a utility method to retrieve execution info func (s *TestBase) GetWorkflowExecutionInfo(ctx context.Context, domainID string, workflowExecution types.WorkflowExecution) ( - *p.WorkflowMutableState, error) { - response, err := s.ExecutionManager.GetWorkflowExecution(ctx, &p.GetWorkflowExecutionRequest{ + *persistence.WorkflowMutableState, error) { + response, err := s.ExecutionManager.GetWorkflowExecution(ctx, &persistence.GetWorkflowExecutionRequest{ DomainID: domainID, Execution: workflowExecution, }) @@ -463,7 +462,7 @@ func (s *TestBase) GetWorkflowExecutionInfo(ctx context.Context, domainID string // GetCurrentWorkflowRunID returns the workflow run ID for the given params func (s *TestBase) GetCurrentWorkflowRunID(ctx context.Context, domainID, workflowID string) (string, error) { - response, err := s.ExecutionManager.GetCurrentExecution(ctx, &p.GetCurrentExecutionRequest{ + response, err := s.ExecutionManager.GetCurrentExecution(ctx, &persistence.GetCurrentExecutionRequest{ DomainID: domainID, WorkflowID: workflowID, }) @@ -478,8 +477,8 @@ func (s *TestBase) GetCurrentWorkflowRunID(ctx context.Context, domainID, workfl // ContinueAsNewExecution is a utility method to create workflow executions func (s *TestBase) ContinueAsNewExecution( ctx context.Context, - updatedInfo *p.WorkflowExecutionInfo, - updatedStats *p.ExecutionStats, + updatedInfo *persistence.WorkflowExecutionInfo, + updatedStats *persistence.ExecutionStats, condition int64, newExecution types.WorkflowExecution, nextEventID, decisionScheduleID int64, @@ -487,22 +486,22 @@ func (s *TestBase) ContinueAsNewExecution( ) error { now := time.Now() - newdecisionTask := &p.DecisionTask{ + newdecisionTask := &persistence.DecisionTask{ TaskID: s.GetNextSequenceNumber(), DomainID: updatedInfo.DomainID, TaskList: updatedInfo.TaskList, ScheduleID: int64(decisionScheduleID), } - versionHistory := p.NewVersionHistory([]byte{}, []*p.VersionHistoryItem{ + versionHistory := persistence.NewVersionHistory([]byte{}, []*persistence.VersionHistoryItem{ {decisionScheduleID, common.EmptyVersion}, }) - versionHistories := p.NewVersionHistories(versionHistory) + versionHistories := persistence.NewVersionHistories(versionHistory) - req := &p.UpdateWorkflowExecutionRequest{ - UpdateWorkflowMutation: p.WorkflowMutation{ + req := &persistence.UpdateWorkflowExecutionRequest{ + UpdateWorkflowMutation: persistence.WorkflowMutation{ ExecutionInfo: updatedInfo, ExecutionStats: updatedStats, - TransferTasks: []p.Task{newdecisionTask}, + TransferTasks: []persistence.Task{newdecisionTask}, TimerTasks: nil, Condition: condition, UpsertActivityInfos: nil, @@ -511,8 +510,8 @@ func (s *TestBase) ContinueAsNewExecution( DeleteTimerInfos: nil, VersionHistories: versionHistories, }, - NewWorkflowSnapshot: &p.WorkflowSnapshot{ - ExecutionInfo: &p.WorkflowExecutionInfo{ + NewWorkflowSnapshot: &persistence.WorkflowSnapshot{ + ExecutionInfo: &persistence.WorkflowExecutionInfo{ CreateRequestID: uuid.New(), DomainID: updatedInfo.DomainID, WorkflowID: newExecution.GetWorkflowID(), @@ -542,8 +541,8 @@ func (s *TestBase) ContinueAsNewExecution( RangeID: s.ShardInfo.RangeID, Encoding: pickRandomEncoding(), } - req.UpdateWorkflowMutation.ExecutionInfo.State = p.WorkflowStateCompleted - req.UpdateWorkflowMutation.ExecutionInfo.CloseStatus = p.WorkflowCloseStatusContinuedAsNew + req.UpdateWorkflowMutation.ExecutionInfo.State = persistence.WorkflowStateCompleted + req.UpdateWorkflowMutation.ExecutionInfo.CloseStatus = persistence.WorkflowCloseStatusContinuedAsNew _, err := s.ExecutionManager.UpdateWorkflowExecution(ctx, req) return err } @@ -551,16 +550,16 @@ func (s *TestBase) ContinueAsNewExecution( // UpdateWorkflowExecution is a utility method to update workflow execution func (s *TestBase) UpdateWorkflowExecution( ctx context.Context, - updatedInfo *p.WorkflowExecutionInfo, - updatedStats *p.ExecutionStats, - updatedVersionHistories *p.VersionHistories, + updatedInfo *persistence.WorkflowExecutionInfo, + updatedStats *persistence.ExecutionStats, + updatedVersionHistories *persistence.VersionHistories, decisionScheduleIDs []int64, activityScheduleIDs []int64, condition int64, - timerTasks []p.Task, - upsertActivityInfos []*p.ActivityInfo, + timerTasks []persistence.Task, + upsertActivityInfos []*persistence.ActivityInfo, deleteActivityInfos []int64, - upsertTimerInfos []*p.TimerInfo, + upsertTimerInfos []*persistence.TimerInfo, deleteTimerInfos []string, ) error { return s.UpdateWorkflowExecutionWithRangeID( @@ -591,16 +590,16 @@ func (s *TestBase) UpdateWorkflowExecution( // UpdateWorkflowExecutionAndFinish is a utility method to update workflow execution func (s *TestBase) UpdateWorkflowExecutionAndFinish( ctx context.Context, - updatedInfo *p.WorkflowExecutionInfo, - updatedStats *p.ExecutionStats, + updatedInfo *persistence.WorkflowExecutionInfo, + updatedStats *persistence.ExecutionStats, condition int64, - versionHistories *p.VersionHistories, + versionHistories *persistence.VersionHistories, ) error { - transferTasks := []p.Task{} - transferTasks = append(transferTasks, &p.CloseExecutionTask{TaskID: s.GetNextSequenceNumber()}) - _, err := s.ExecutionManager.UpdateWorkflowExecution(ctx, &p.UpdateWorkflowExecutionRequest{ + transferTasks := []persistence.Task{} + transferTasks = append(transferTasks, &persistence.CloseExecutionTask{TaskID: s.GetNextSequenceNumber()}) + _, err := s.ExecutionManager.UpdateWorkflowExecution(ctx, &persistence.UpdateWorkflowExecutionRequest{ RangeID: s.ShardInfo.RangeID, - UpdateWorkflowMutation: p.WorkflowMutation{ + UpdateWorkflowMutation: persistence.WorkflowMutation{ ExecutionInfo: updatedInfo, ExecutionStats: updatedStats, TransferTasks: transferTasks, @@ -620,11 +619,11 @@ func (s *TestBase) UpdateWorkflowExecutionAndFinish( // UpsertChildExecutionsState is a utility method to update mutable state of workflow execution func (s *TestBase) UpsertChildExecutionsState( ctx context.Context, - updatedInfo *p.WorkflowExecutionInfo, - updatedStats *p.ExecutionStats, - updatedVersionHistories *p.VersionHistories, + updatedInfo *persistence.WorkflowExecutionInfo, + updatedStats *persistence.ExecutionStats, + updatedVersionHistories *persistence.VersionHistories, condition int64, - upsertChildInfos []*p.ChildExecutionInfo, + upsertChildInfos []*persistence.ChildExecutionInfo, ) error { return s.UpdateWorkflowExecutionWithRangeID( @@ -655,11 +654,11 @@ func (s *TestBase) UpsertChildExecutionsState( // UpsertRequestCancelState is a utility method to update mutable state of workflow execution func (s *TestBase) UpsertRequestCancelState( ctx context.Context, - updatedInfo *p.WorkflowExecutionInfo, - updatedStats *p.ExecutionStats, - updatedVersionHistories *p.VersionHistories, + updatedInfo *persistence.WorkflowExecutionInfo, + updatedStats *persistence.ExecutionStats, + updatedVersionHistories *persistence.VersionHistories, condition int64, - upsertCancelInfos []*p.RequestCancelInfo, + upsertCancelInfos []*persistence.RequestCancelInfo, ) error { return s.UpdateWorkflowExecutionWithRangeID( @@ -690,11 +689,11 @@ func (s *TestBase) UpsertRequestCancelState( // UpsertSignalInfoState is a utility method to update mutable state of workflow execution func (s *TestBase) UpsertSignalInfoState( ctx context.Context, - updatedInfo *p.WorkflowExecutionInfo, - updatedStats *p.ExecutionStats, - updatedVersionHistories *p.VersionHistories, + updatedInfo *persistence.WorkflowExecutionInfo, + updatedStats *persistence.ExecutionStats, + updatedVersionHistories *persistence.VersionHistories, condition int64, - upsertSignalInfos []*p.SignalInfo, + upsertSignalInfos []*persistence.SignalInfo, ) error { return s.UpdateWorkflowExecutionWithRangeID( @@ -725,9 +724,9 @@ func (s *TestBase) UpsertSignalInfoState( // UpsertSignalsRequestedState is a utility method to update mutable state of workflow execution func (s *TestBase) UpsertSignalsRequestedState( ctx context.Context, - updatedInfo *p.WorkflowExecutionInfo, - updatedStats *p.ExecutionStats, - updatedVersionHistories *p.VersionHistories, + updatedInfo *persistence.WorkflowExecutionInfo, + updatedStats *persistence.ExecutionStats, + updatedVersionHistories *persistence.VersionHistories, condition int64, upsertSignalsRequested []string, ) error { @@ -759,9 +758,9 @@ func (s *TestBase) UpsertSignalsRequestedState( // DeleteChildExecutionsState is a utility method to delete child execution from mutable state func (s *TestBase) DeleteChildExecutionsState( ctx context.Context, - updatedInfo *p.WorkflowExecutionInfo, - updatedStats *p.ExecutionStats, - updatedVersionHistories *p.VersionHistories, + updatedInfo *persistence.WorkflowExecutionInfo, + updatedStats *persistence.ExecutionStats, + updatedVersionHistories *persistence.VersionHistories, condition int64, deleteChildInfo int64, ) error { @@ -793,9 +792,9 @@ func (s *TestBase) DeleteChildExecutionsState( // DeleteCancelState is a utility method to delete request cancel state from mutable state func (s *TestBase) DeleteCancelState( ctx context.Context, - updatedInfo *p.WorkflowExecutionInfo, - updatedStats *p.ExecutionStats, - updatedVersionHistories *p.VersionHistories, + updatedInfo *persistence.WorkflowExecutionInfo, + updatedStats *persistence.ExecutionStats, + updatedVersionHistories *persistence.VersionHistories, condition int64, deleteCancelInfo int64, ) error { @@ -827,9 +826,9 @@ func (s *TestBase) DeleteCancelState( // DeleteSignalState is a utility method to delete request cancel state from mutable state func (s *TestBase) DeleteSignalState( ctx context.Context, - updatedInfo *p.WorkflowExecutionInfo, - updatedStats *p.ExecutionStats, - updatedVersionHistories *p.VersionHistories, + updatedInfo *persistence.WorkflowExecutionInfo, + updatedStats *persistence.ExecutionStats, + updatedVersionHistories *persistence.VersionHistories, condition int64, deleteSignalInfo int64, ) error { @@ -861,9 +860,9 @@ func (s *TestBase) DeleteSignalState( // DeleteSignalsRequestedState is a utility method to delete mutable state of workflow execution func (s *TestBase) DeleteSignalsRequestedState( ctx context.Context, - updatedInfo *p.WorkflowExecutionInfo, - updatedStats *p.ExecutionStats, - updatedVersionHistories *p.VersionHistories, + updatedInfo *persistence.WorkflowExecutionInfo, + updatedStats *persistence.ExecutionStats, + updatedVersionHistories *persistence.VersionHistories, condition int64, deleteSignalsRequestedIDs []string, ) error { @@ -895,11 +894,11 @@ func (s *TestBase) DeleteSignalsRequestedState( // UpdateWorklowStateAndReplication is a utility method to update workflow execution func (s *TestBase) UpdateWorklowStateAndReplication( ctx context.Context, - updatedInfo *p.WorkflowExecutionInfo, - updatedStats *p.ExecutionStats, - updatedVersionHistories *p.VersionHistories, + updatedInfo *persistence.WorkflowExecutionInfo, + updatedStats *persistence.ExecutionStats, + updatedVersionHistories *persistence.VersionHistories, condition int64, - txTasks []p.Task, + txTasks []persistence.Task, ) error { return s.UpdateWorkflowExecutionWithReplication( @@ -931,23 +930,23 @@ func (s *TestBase) UpdateWorklowStateAndReplication( // UpdateWorkflowExecutionWithRangeID is a utility method to update workflow execution func (s *TestBase) UpdateWorkflowExecutionWithRangeID( ctx context.Context, - updatedInfo *p.WorkflowExecutionInfo, - updatedStats *p.ExecutionStats, - updatedVersionHistories *p.VersionHistories, + updatedInfo *persistence.WorkflowExecutionInfo, + updatedStats *persistence.ExecutionStats, + updatedVersionHistories *persistence.VersionHistories, decisionScheduleIDs []int64, activityScheduleIDs []int64, rangeID int64, condition int64, - timerTasks []p.Task, - upsertActivityInfos []*p.ActivityInfo, + timerTasks []persistence.Task, + upsertActivityInfos []*persistence.ActivityInfo, deleteActivityInfos []int64, - upsertTimerInfos []*p.TimerInfo, + upsertTimerInfos []*persistence.TimerInfo, deleteTimerInfos []string, - upsertChildInfos []*p.ChildExecutionInfo, + upsertChildInfos []*persistence.ChildExecutionInfo, deleteChildInfos []int64, - upsertCancelInfos []*p.RequestCancelInfo, + upsertCancelInfos []*persistence.RequestCancelInfo, deleteCancelInfos []int64, - upsertSignalInfos []*p.SignalInfo, + upsertSignalInfos []*persistence.SignalInfo, deleteSignalInfos []int64, upsertSignalRequestedIDs []string, deleteSignalRequestedIDs []string, @@ -962,7 +961,7 @@ func (s *TestBase) UpdateWorkflowExecutionWithRangeID( rangeID, condition, timerTasks, - []p.Task{}, + []persistence.Task{}, upsertActivityInfos, deleteActivityInfos, upsertTimerInfos, @@ -981,62 +980,62 @@ func (s *TestBase) UpdateWorkflowExecutionWithRangeID( // UpdateWorkflowExecutionWithReplication is a utility method to update workflow execution func (s *TestBase) UpdateWorkflowExecutionWithReplication( ctx context.Context, - updatedInfo *p.WorkflowExecutionInfo, - updatedStats *p.ExecutionStats, - updatedVersionHistories *p.VersionHistories, + updatedInfo *persistence.WorkflowExecutionInfo, + updatedStats *persistence.ExecutionStats, + updatedVersionHistories *persistence.VersionHistories, decisionScheduleIDs []int64, activityScheduleIDs []int64, rangeID int64, condition int64, - timerTasks []p.Task, - txTasks []p.Task, - upsertActivityInfos []*p.ActivityInfo, + timerTasks []persistence.Task, + txTasks []persistence.Task, + upsertActivityInfos []*persistence.ActivityInfo, deleteActivityInfos []int64, - upsertTimerInfos []*p.TimerInfo, + upsertTimerInfos []*persistence.TimerInfo, deleteTimerInfos []string, - upsertChildInfos []*p.ChildExecutionInfo, + upsertChildInfos []*persistence.ChildExecutionInfo, deleteChildInfos []int64, - upsertCancelInfos []*p.RequestCancelInfo, + upsertCancelInfos []*persistence.RequestCancelInfo, deleteCancelInfos []int64, - upsertSignalInfos []*p.SignalInfo, + upsertSignalInfos []*persistence.SignalInfo, deleteSignalInfos []int64, upsertSignalRequestedIDs []string, deleteSignalRequestedIDs []string, ) error { // TODO: use separate fields for those three task types - var transferTasks []p.Task - var crossClusterTasks []p.Task - var replicationTasks []p.Task + var transferTasks []persistence.Task + var crossClusterTasks []persistence.Task + var replicationTasks []persistence.Task for _, task := range txTasks { switch t := task.(type) { - case *p.DecisionTask, - *p.ActivityTask, - *p.CloseExecutionTask, - *p.RecordWorkflowClosedTask, - *p.RecordChildExecutionCompletedTask, - *p.ApplyParentClosePolicyTask, - *p.CancelExecutionTask, - *p.StartChildExecutionTask, - *p.SignalExecutionTask, - *p.RecordWorkflowStartedTask, - *p.ResetWorkflowTask, - *p.UpsertWorkflowSearchAttributesTask: + case *persistence.DecisionTask, + *persistence.ActivityTask, + *persistence.CloseExecutionTask, + *persistence.RecordWorkflowClosedTask, + *persistence.RecordChildExecutionCompletedTask, + *persistence.ApplyParentClosePolicyTask, + *persistence.CancelExecutionTask, + *persistence.StartChildExecutionTask, + *persistence.SignalExecutionTask, + *persistence.RecordWorkflowStartedTask, + *persistence.ResetWorkflowTask, + *persistence.UpsertWorkflowSearchAttributesTask: transferTasks = append(transferTasks, t) - case *p.CrossClusterStartChildExecutionTask, - *p.CrossClusterCancelExecutionTask, - *p.CrossClusterSignalExecutionTask, - *p.CrossClusterRecordChildExecutionCompletedTask, - *p.CrossClusterApplyParentClosePolicyTask: + case *persistence.CrossClusterStartChildExecutionTask, + *persistence.CrossClusterCancelExecutionTask, + *persistence.CrossClusterSignalExecutionTask, + *persistence.CrossClusterRecordChildExecutionCompletedTask, + *persistence.CrossClusterApplyParentClosePolicyTask: crossClusterTasks = append(crossClusterTasks, t) - case *p.HistoryReplicationTask, *p.SyncActivityTask: + case *persistence.HistoryReplicationTask, *persistence.SyncActivityTask: replicationTasks = append(replicationTasks, t) default: panic(fmt.Sprintf("Unknown transfer task type. %v", t)) } } for _, decisionScheduleID := range decisionScheduleIDs { - transferTasks = append(transferTasks, &p.DecisionTask{ + transferTasks = append(transferTasks, &persistence.DecisionTask{ TaskID: s.GetNextSequenceNumber(), DomainID: updatedInfo.DomainID, TaskList: updatedInfo.TaskList, @@ -1044,15 +1043,15 @@ func (s *TestBase) UpdateWorkflowExecutionWithReplication( } for _, activityScheduleID := range activityScheduleIDs { - transferTasks = append(transferTasks, &p.ActivityTask{ + transferTasks = append(transferTasks, &persistence.ActivityTask{ TaskID: s.GetNextSequenceNumber(), DomainID: updatedInfo.DomainID, TaskList: updatedInfo.TaskList, ScheduleID: int64(activityScheduleID)}) } - _, err := s.ExecutionManager.UpdateWorkflowExecution(ctx, &p.UpdateWorkflowExecutionRequest{ + _, err := s.ExecutionManager.UpdateWorkflowExecution(ctx, &persistence.UpdateWorkflowExecutionRequest{ RangeID: rangeID, - UpdateWorkflowMutation: p.WorkflowMutation{ + UpdateWorkflowMutation: persistence.WorkflowMutation{ ExecutionInfo: updatedInfo, ExecutionStats: updatedStats, VersionHistories: updatedVersionHistories, @@ -1087,16 +1086,16 @@ func (s *TestBase) UpdateWorkflowExecutionWithReplication( // with IgnoreCurrent update mode. func (s *TestBase) UpdateWorkflowExecutionTasks( ctx context.Context, - updatedInfo *p.WorkflowExecutionInfo, - updatedStats *p.ExecutionStats, + updatedInfo *persistence.WorkflowExecutionInfo, + updatedStats *persistence.ExecutionStats, condition int64, - transferTasks []p.Task, - timerTasks []p.Task, - crossClusterTasks []p.Task, + transferTasks []persistence.Task, + timerTasks []persistence.Task, + crossClusterTasks []persistence.Task, ) error { - _, err := s.ExecutionManager.UpdateWorkflowExecution(ctx, &p.UpdateWorkflowExecutionRequest{ + _, err := s.ExecutionManager.UpdateWorkflowExecution(ctx, &persistence.UpdateWorkflowExecutionRequest{ Mode: persistence.UpdateWorkflowModeIgnoreCurrent, - UpdateWorkflowMutation: p.WorkflowMutation{ + UpdateWorkflowMutation: persistence.WorkflowMutation{ ExecutionInfo: updatedInfo, ExecutionStats: updatedStats, TransferTasks: transferTasks, @@ -1113,16 +1112,16 @@ func (s *TestBase) UpdateWorkflowExecutionTasks( // UpdateWorkflowExecutionWithTransferTasks is a utility method to update workflow execution func (s *TestBase) UpdateWorkflowExecutionWithTransferTasks( ctx context.Context, - updatedInfo *p.WorkflowExecutionInfo, - updatedStats *p.ExecutionStats, + updatedInfo *persistence.WorkflowExecutionInfo, + updatedStats *persistence.ExecutionStats, condition int64, - transferTasks []p.Task, - upsertActivityInfo []*p.ActivityInfo, - versionHistories *p.VersionHistories, + transferTasks []persistence.Task, + upsertActivityInfo []*persistence.ActivityInfo, + versionHistories *persistence.VersionHistories, ) error { - _, err := s.ExecutionManager.UpdateWorkflowExecution(ctx, &p.UpdateWorkflowExecutionRequest{ - UpdateWorkflowMutation: p.WorkflowMutation{ + _, err := s.ExecutionManager.UpdateWorkflowExecution(ctx, &persistence.UpdateWorkflowExecutionRequest{ + UpdateWorkflowMutation: persistence.WorkflowMutation{ ExecutionInfo: updatedInfo, ExecutionStats: updatedStats, TransferTasks: transferTasks, @@ -1139,9 +1138,9 @@ func (s *TestBase) UpdateWorkflowExecutionWithTransferTasks( // UpdateWorkflowExecutionForChildExecutionsInitiated is a utility method to update workflow execution func (s *TestBase) UpdateWorkflowExecutionForChildExecutionsInitiated( ctx context.Context, - updatedInfo *p.WorkflowExecutionInfo, updatedStats *p.ExecutionStats, condition int64, transferTasks []p.Task, childInfos []*p.ChildExecutionInfo) error { - _, err := s.ExecutionManager.UpdateWorkflowExecution(ctx, &p.UpdateWorkflowExecutionRequest{ - UpdateWorkflowMutation: p.WorkflowMutation{ + updatedInfo *persistence.WorkflowExecutionInfo, updatedStats *persistence.ExecutionStats, condition int64, transferTasks []persistence.Task, childInfos []*persistence.ChildExecutionInfo) error { + _, err := s.ExecutionManager.UpdateWorkflowExecution(ctx, &persistence.UpdateWorkflowExecutionRequest{ + UpdateWorkflowMutation: persistence.WorkflowMutation{ ExecutionInfo: updatedInfo, ExecutionStats: updatedStats, TransferTasks: transferTasks, @@ -1157,10 +1156,10 @@ func (s *TestBase) UpdateWorkflowExecutionForChildExecutionsInitiated( // UpdateWorkflowExecutionForRequestCancel is a utility method to update workflow execution func (s *TestBase) UpdateWorkflowExecutionForRequestCancel( ctx context.Context, - updatedInfo *p.WorkflowExecutionInfo, updatedStats *p.ExecutionStats, condition int64, transferTasks []p.Task, - upsertRequestCancelInfo []*p.RequestCancelInfo) error { - _, err := s.ExecutionManager.UpdateWorkflowExecution(ctx, &p.UpdateWorkflowExecutionRequest{ - UpdateWorkflowMutation: p.WorkflowMutation{ + updatedInfo *persistence.WorkflowExecutionInfo, updatedStats *persistence.ExecutionStats, condition int64, transferTasks []persistence.Task, + upsertRequestCancelInfo []*persistence.RequestCancelInfo) error { + _, err := s.ExecutionManager.UpdateWorkflowExecution(ctx, &persistence.UpdateWorkflowExecutionRequest{ + UpdateWorkflowMutation: persistence.WorkflowMutation{ ExecutionInfo: updatedInfo, ExecutionStats: updatedStats, TransferTasks: transferTasks, @@ -1176,10 +1175,10 @@ func (s *TestBase) UpdateWorkflowExecutionForRequestCancel( // UpdateWorkflowExecutionForSignal is a utility method to update workflow execution func (s *TestBase) UpdateWorkflowExecutionForSignal( ctx context.Context, - updatedInfo *p.WorkflowExecutionInfo, updatedStats *p.ExecutionStats, condition int64, transferTasks []p.Task, - upsertSignalInfos []*p.SignalInfo) error { - _, err := s.ExecutionManager.UpdateWorkflowExecution(ctx, &p.UpdateWorkflowExecutionRequest{ - UpdateWorkflowMutation: p.WorkflowMutation{ + updatedInfo *persistence.WorkflowExecutionInfo, updatedStats *persistence.ExecutionStats, condition int64, transferTasks []persistence.Task, + upsertSignalInfos []*persistence.SignalInfo) error { + _, err := s.ExecutionManager.UpdateWorkflowExecution(ctx, &persistence.UpdateWorkflowExecutionRequest{ + UpdateWorkflowMutation: persistence.WorkflowMutation{ ExecutionInfo: updatedInfo, ExecutionStats: updatedStats, TransferTasks: transferTasks, @@ -1195,16 +1194,16 @@ func (s *TestBase) UpdateWorkflowExecutionForSignal( // UpdateWorkflowExecutionForBufferEvents is a utility method to update workflow execution func (s *TestBase) UpdateWorkflowExecutionForBufferEvents( ctx context.Context, - updatedInfo *p.WorkflowExecutionInfo, - updatedStats *p.ExecutionStats, + updatedInfo *persistence.WorkflowExecutionInfo, + updatedStats *persistence.ExecutionStats, condition int64, bufferEvents []*types.HistoryEvent, clearBufferedEvents bool, - versionHistories *p.VersionHistories, + versionHistories *persistence.VersionHistories, ) error { - _, err := s.ExecutionManager.UpdateWorkflowExecution(ctx, &p.UpdateWorkflowExecutionRequest{ - UpdateWorkflowMutation: p.WorkflowMutation{ + _, err := s.ExecutionManager.UpdateWorkflowExecution(ctx, &persistence.UpdateWorkflowExecutionRequest{ + UpdateWorkflowMutation: persistence.WorkflowMutation{ ExecutionInfo: updatedInfo, ExecutionStats: updatedStats, NewBufferedEvents: bufferEvents, @@ -1219,28 +1218,28 @@ func (s *TestBase) UpdateWorkflowExecutionForBufferEvents( } // UpdateAllMutableState is a utility method to update workflow execution -func (s *TestBase) UpdateAllMutableState(ctx context.Context, updatedMutableState *p.WorkflowMutableState, condition int64) error { - var aInfos []*p.ActivityInfo +func (s *TestBase) UpdateAllMutableState(ctx context.Context, updatedMutableState *persistence.WorkflowMutableState, condition int64) error { + var aInfos []*persistence.ActivityInfo for _, ai := range updatedMutableState.ActivityInfos { aInfos = append(aInfos, ai) } - var tInfos []*p.TimerInfo + var tInfos []*persistence.TimerInfo for _, ti := range updatedMutableState.TimerInfos { tInfos = append(tInfos, ti) } - var cInfos []*p.ChildExecutionInfo + var cInfos []*persistence.ChildExecutionInfo for _, ci := range updatedMutableState.ChildExecutionInfos { cInfos = append(cInfos, ci) } - var rcInfos []*p.RequestCancelInfo + var rcInfos []*persistence.RequestCancelInfo for _, rci := range updatedMutableState.RequestCancelInfos { rcInfos = append(rcInfos, rci) } - var sInfos []*p.SignalInfo + var sInfos []*persistence.SignalInfo for _, si := range updatedMutableState.SignalInfos { sInfos = append(sInfos, si) } @@ -1249,9 +1248,9 @@ func (s *TestBase) UpdateAllMutableState(ctx context.Context, updatedMutableStat for id := range updatedMutableState.SignalRequestedIDs { srIDs = append(srIDs, id) } - _, err := s.ExecutionManager.UpdateWorkflowExecution(ctx, &p.UpdateWorkflowExecutionRequest{ + _, err := s.ExecutionManager.UpdateWorkflowExecution(ctx, &persistence.UpdateWorkflowExecutionRequest{ RangeID: s.ShardInfo.RangeID, - UpdateWorkflowMutation: p.WorkflowMutation{ + UpdateWorkflowMutation: persistence.WorkflowMutation{ ExecutionInfo: updatedMutableState.ExecutionInfo, ExecutionStats: updatedMutableState.ExecutionStats, Condition: condition, @@ -1271,21 +1270,21 @@ func (s *TestBase) UpdateAllMutableState(ctx context.Context, updatedMutableStat // ConflictResolveWorkflowExecution is utility method to reset mutable state func (s *TestBase) ConflictResolveWorkflowExecution( ctx context.Context, - info *p.WorkflowExecutionInfo, - stats *p.ExecutionStats, + info *persistence.WorkflowExecutionInfo, + stats *persistence.ExecutionStats, nextEventID int64, - activityInfos []*p.ActivityInfo, - timerInfos []*p.TimerInfo, - childExecutionInfos []*p.ChildExecutionInfo, - requestCancelInfos []*p.RequestCancelInfo, - signalInfos []*p.SignalInfo, + activityInfos []*persistence.ActivityInfo, + timerInfos []*persistence.TimerInfo, + childExecutionInfos []*persistence.ChildExecutionInfo, + requestCancelInfos []*persistence.RequestCancelInfo, + signalInfos []*persistence.SignalInfo, ids []string, - versionHistories *p.VersionHistories, + versionHistories *persistence.VersionHistories, ) error { - _, err := s.ExecutionManager.ConflictResolveWorkflowExecution(ctx, &p.ConflictResolveWorkflowExecutionRequest{ + _, err := s.ExecutionManager.ConflictResolveWorkflowExecution(ctx, &persistence.ConflictResolveWorkflowExecutionRequest{ RangeID: s.ShardInfo.RangeID, - ResetWorkflowSnapshot: p.WorkflowSnapshot{ + ResetWorkflowSnapshot: persistence.WorkflowSnapshot{ ExecutionInfo: info, ExecutionStats: stats, Condition: nextEventID, @@ -1304,8 +1303,8 @@ func (s *TestBase) ConflictResolveWorkflowExecution( } // DeleteWorkflowExecution is a utility method to delete a workflow execution -func (s *TestBase) DeleteWorkflowExecution(ctx context.Context, info *p.WorkflowExecutionInfo) error { - return s.ExecutionManager.DeleteWorkflowExecution(ctx, &p.DeleteWorkflowExecutionRequest{ +func (s *TestBase) DeleteWorkflowExecution(ctx context.Context, info *persistence.WorkflowExecutionInfo) error { + return s.ExecutionManager.DeleteWorkflowExecution(ctx, &persistence.DeleteWorkflowExecutionRequest{ DomainID: info.DomainID, WorkflowID: info.WorkflowID, RunID: info.RunID, @@ -1313,8 +1312,8 @@ func (s *TestBase) DeleteWorkflowExecution(ctx context.Context, info *p.Workflow } // DeleteCurrentWorkflowExecution is a utility method to delete the workflow current execution -func (s *TestBase) DeleteCurrentWorkflowExecution(ctx context.Context, info *p.WorkflowExecutionInfo) error { - return s.ExecutionManager.DeleteCurrentWorkflowExecution(ctx, &p.DeleteCurrentWorkflowExecutionRequest{ +func (s *TestBase) DeleteCurrentWorkflowExecution(ctx context.Context, info *persistence.WorkflowExecutionInfo) error { + return s.ExecutionManager.DeleteCurrentWorkflowExecution(ctx, &persistence.DeleteCurrentWorkflowExecutionRequest{ DomainID: info.DomainID, WorkflowID: info.WorkflowID, RunID: info.RunID, @@ -1322,13 +1321,13 @@ func (s *TestBase) DeleteCurrentWorkflowExecution(ctx context.Context, info *p.W } // GetTransferTasks is a utility method to get tasks from transfer task queue -func (s *TestBase) GetTransferTasks(ctx context.Context, batchSize int, getAll bool) ([]*p.TransferTaskInfo, error) { - result := []*p.TransferTaskInfo{} +func (s *TestBase) GetTransferTasks(ctx context.Context, batchSize int, getAll bool) ([]*persistence.TransferTaskInfo, error) { + result := []*persistence.TransferTaskInfo{} var token []byte Loop: for { - response, err := s.ExecutionManager.GetTransferTasks(ctx, &p.GetTransferTasksRequest{ + response, err := s.ExecutionManager.GetTransferTasks(ctx, &persistence.GetTransferTasksRequest{ ReadLevel: 0, MaxReadLevel: math.MaxInt64, BatchSize: batchSize, @@ -1349,12 +1348,12 @@ Loop: } // GetCrossClusterTasks is a utility method to get tasks from transfer task queue -func (s *TestBase) GetCrossClusterTasks(ctx context.Context, targetCluster string, readLevel int64, batchSize int, getAll bool) ([]*p.CrossClusterTaskInfo, error) { - result := []*p.CrossClusterTaskInfo{} +func (s *TestBase) GetCrossClusterTasks(ctx context.Context, targetCluster string, readLevel int64, batchSize int, getAll bool) ([]*persistence.CrossClusterTaskInfo, error) { + result := []*persistence.CrossClusterTaskInfo{} var token []byte for { - response, err := s.ExecutionManager.GetCrossClusterTasks(ctx, &p.GetCrossClusterTasksRequest{ + response, err := s.ExecutionManager.GetCrossClusterTasks(ctx, &persistence.GetCrossClusterTasksRequest{ TargetCluster: targetCluster, ReadLevel: readLevel, MaxReadLevel: int64(math.MaxInt64), @@ -1376,13 +1375,13 @@ func (s *TestBase) GetCrossClusterTasks(ctx context.Context, targetCluster strin } // GetReplicationTasks is a utility method to get tasks from replication task queue -func (s *TestBase) GetReplicationTasks(ctx context.Context, batchSize int, getAll bool) ([]*p.ReplicationTaskInfo, error) { - result := []*p.ReplicationTaskInfo{} +func (s *TestBase) GetReplicationTasks(ctx context.Context, batchSize int, getAll bool) ([]*persistence.ReplicationTaskInfo, error) { + result := []*persistence.ReplicationTaskInfo{} var token []byte Loop: for { - response, err := s.ExecutionManager.GetReplicationTasks(ctx, &p.GetReplicationTasksRequest{ + response, err := s.ExecutionManager.GetReplicationTasks(ctx, &persistence.GetReplicationTasksRequest{ ReadLevel: 0, MaxReadLevel: math.MaxInt64, BatchSize: batchSize, @@ -1405,14 +1404,14 @@ Loop: // RangeCompleteReplicationTask is a utility method to complete a range of replication tasks func (s *TestBase) RangeCompleteReplicationTask(ctx context.Context, inclusiveEndTaskID int64) error { for { - resp, err := s.ExecutionManager.RangeCompleteReplicationTask(ctx, &p.RangeCompleteReplicationTaskRequest{ + resp, err := s.ExecutionManager.RangeCompleteReplicationTask(ctx, &persistence.RangeCompleteReplicationTaskRequest{ InclusiveEndTaskID: inclusiveEndTaskID, PageSize: 1, }) if err != nil { return err } - if !p.HasMoreRowsToDelete(resp.TasksCompleted, 1) { + if !persistence.HasMoreRowsToDelete(resp.TasksCompleted, 1) { break } } @@ -1423,10 +1422,10 @@ func (s *TestBase) RangeCompleteReplicationTask(ctx context.Context, inclusiveEn func (s *TestBase) PutReplicationTaskToDLQ( ctx context.Context, sourceCluster string, - taskInfo *p.ReplicationTaskInfo, + taskInfo *persistence.ReplicationTaskInfo, ) error { - return s.ExecutionManager.PutReplicationTaskToDLQ(ctx, &p.PutReplicationTaskToDLQRequest{ + return s.ExecutionManager.PutReplicationTaskToDLQ(ctx, &persistence.PutReplicationTaskToDLQRequest{ SourceClusterName: sourceCluster, TaskInfo: taskInfo, }) @@ -1440,11 +1439,11 @@ func (s *TestBase) GetReplicationTasksFromDLQ( maxReadLevel int64, pageSize int, pageToken []byte, -) (*p.GetReplicationTasksFromDLQResponse, error) { +) (*persistence.GetReplicationTasksFromDLQResponse, error) { - return s.ExecutionManager.GetReplicationTasksFromDLQ(ctx, &p.GetReplicationTasksFromDLQRequest{ + return s.ExecutionManager.GetReplicationTasksFromDLQ(ctx, &persistence.GetReplicationTasksFromDLQRequest{ SourceClusterName: sourceCluster, - GetReplicationTasksRequest: p.GetReplicationTasksRequest{ + GetReplicationTasksRequest: persistence.GetReplicationTasksRequest{ ReadLevel: readLevel, MaxReadLevel: maxReadLevel, BatchSize: pageSize, @@ -1457,9 +1456,9 @@ func (s *TestBase) GetReplicationTasksFromDLQ( func (s *TestBase) GetReplicationDLQSize( ctx context.Context, sourceCluster string, -) (*p.GetReplicationDLQSizeResponse, error) { +) (*persistence.GetReplicationDLQSizeResponse, error) { - return s.ExecutionManager.GetReplicationDLQSize(ctx, &p.GetReplicationDLQSizeRequest{ + return s.ExecutionManager.GetReplicationDLQSize(ctx, &persistence.GetReplicationDLQSizeRequest{ SourceClusterName: sourceCluster, }) } @@ -1471,7 +1470,7 @@ func (s *TestBase) DeleteReplicationTaskFromDLQ( taskID int64, ) error { - return s.ExecutionManager.DeleteReplicationTaskFromDLQ(ctx, &p.DeleteReplicationTaskFromDLQRequest{ + return s.ExecutionManager.DeleteReplicationTaskFromDLQ(ctx, &persistence.DeleteReplicationTaskFromDLQRequest{ SourceClusterName: sourceCluster, TaskID: taskID, }) @@ -1485,7 +1484,7 @@ func (s *TestBase) RangeDeleteReplicationTaskFromDLQ( endTaskID int64, ) error { - _, err := s.ExecutionManager.RangeDeleteReplicationTaskFromDLQ(ctx, &p.RangeDeleteReplicationTaskFromDLQRequest{ + _, err := s.ExecutionManager.RangeDeleteReplicationTaskFromDLQ(ctx, &persistence.RangeDeleteReplicationTaskFromDLQRequest{ SourceClusterName: sourceCluster, ExclusiveBeginTaskID: beginTaskID, InclusiveEndTaskID: endTaskID, @@ -1496,10 +1495,10 @@ func (s *TestBase) RangeDeleteReplicationTaskFromDLQ( // CreateFailoverMarkers is a utility method to create failover markers func (s *TestBase) CreateFailoverMarkers( ctx context.Context, - markers []*p.FailoverMarkerTask, + markers []*persistence.FailoverMarkerTask, ) error { - return s.ExecutionManager.CreateFailoverMarkerTasks(ctx, &p.CreateFailoverMarkersRequest{ + return s.ExecutionManager.CreateFailoverMarkerTasks(ctx, &persistence.CreateFailoverMarkersRequest{ RangeID: s.ShardInfo.RangeID, Markers: markers, }) @@ -1508,7 +1507,7 @@ func (s *TestBase) CreateFailoverMarkers( // CompleteTransferTask is a utility method to complete a transfer task func (s *TestBase) CompleteTransferTask(ctx context.Context, taskID int64) error { - return s.ExecutionManager.CompleteTransferTask(ctx, &p.CompleteTransferTaskRequest{ + return s.ExecutionManager.CompleteTransferTask(ctx, &persistence.CompleteTransferTaskRequest{ TaskID: taskID, }) } @@ -1516,7 +1515,7 @@ func (s *TestBase) CompleteTransferTask(ctx context.Context, taskID int64) error // RangeCompleteTransferTask is a utility method to complete a range of transfer tasks func (s *TestBase) RangeCompleteTransferTask(ctx context.Context, exclusiveBeginTaskID int64, inclusiveEndTaskID int64) error { for { - resp, err := s.ExecutionManager.RangeCompleteTransferTask(ctx, &p.RangeCompleteTransferTaskRequest{ + resp, err := s.ExecutionManager.RangeCompleteTransferTask(ctx, &persistence.RangeCompleteTransferTaskRequest{ ExclusiveBeginTaskID: exclusiveBeginTaskID, InclusiveEndTaskID: inclusiveEndTaskID, PageSize: 1, @@ -1524,7 +1523,7 @@ func (s *TestBase) RangeCompleteTransferTask(ctx context.Context, exclusiveBegin if err != nil { return err } - if !p.HasMoreRowsToDelete(resp.TasksCompleted, 1) { + if !persistence.HasMoreRowsToDelete(resp.TasksCompleted, 1) { break } } @@ -1533,7 +1532,7 @@ func (s *TestBase) RangeCompleteTransferTask(ctx context.Context, exclusiveBegin // CompleteCrossClusterTask is a utility method to complete a cross-cluster task func (s *TestBase) CompleteCrossClusterTask(ctx context.Context, targetCluster string, taskID int64) error { - return s.ExecutionManager.CompleteCrossClusterTask(ctx, &p.CompleteCrossClusterTaskRequest{ + return s.ExecutionManager.CompleteCrossClusterTask(ctx, &persistence.CompleteCrossClusterTaskRequest{ TargetCluster: targetCluster, TaskID: taskID, }) @@ -1542,7 +1541,7 @@ func (s *TestBase) CompleteCrossClusterTask(ctx context.Context, targetCluster s // RangeCompleteCrossClusterTask is a utility method to complete a range of cross-cluster tasks func (s *TestBase) RangeCompleteCrossClusterTask(ctx context.Context, targetCluster string, exclusiveBeginTaskID int64, inclusiveEndTaskID int64) error { for { - resp, err := s.ExecutionManager.RangeCompleteCrossClusterTask(ctx, &p.RangeCompleteCrossClusterTaskRequest{ + resp, err := s.ExecutionManager.RangeCompleteCrossClusterTask(ctx, &persistence.RangeCompleteCrossClusterTaskRequest{ TargetCluster: targetCluster, ExclusiveBeginTaskID: exclusiveBeginTaskID, InclusiveEndTaskID: inclusiveEndTaskID, @@ -1551,7 +1550,7 @@ func (s *TestBase) RangeCompleteCrossClusterTask(ctx context.Context, targetClus if err != nil { return err } - if !p.HasMoreRowsToDelete(resp.TasksCompleted, 1) { + if !persistence.HasMoreRowsToDelete(resp.TasksCompleted, 1) { break } } @@ -1561,19 +1560,19 @@ func (s *TestBase) RangeCompleteCrossClusterTask(ctx context.Context, targetClus // CompleteReplicationTask is a utility method to complete a replication task func (s *TestBase) CompleteReplicationTask(ctx context.Context, taskID int64) error { - return s.ExecutionManager.CompleteReplicationTask(ctx, &p.CompleteReplicationTaskRequest{ + return s.ExecutionManager.CompleteReplicationTask(ctx, &persistence.CompleteReplicationTaskRequest{ TaskID: taskID, }) } // GetTimerIndexTasks is a utility method to get tasks from transfer task queue -func (s *TestBase) GetTimerIndexTasks(ctx context.Context, batchSize int, getAll bool) ([]*p.TimerTaskInfo, error) { - result := []*p.TimerTaskInfo{} +func (s *TestBase) GetTimerIndexTasks(ctx context.Context, batchSize int, getAll bool) ([]*persistence.TimerTaskInfo, error) { + result := []*persistence.TimerTaskInfo{} var token []byte Loop: for { - response, err := s.ExecutionManager.GetTimerIndexTasks(ctx, &p.GetTimerIndexTasksRequest{ + response, err := s.ExecutionManager.GetTimerIndexTasks(ctx, &persistence.GetTimerIndexTasksRequest{ MinTimestamp: time.Time{}, MaxTimestamp: time.Unix(0, math.MaxInt64), BatchSize: batchSize, @@ -1595,7 +1594,7 @@ Loop: // CompleteTimerTask is a utility method to complete a timer task func (s *TestBase) CompleteTimerTask(ctx context.Context, ts time.Time, taskID int64) error { - return s.ExecutionManager.CompleteTimerTask(ctx, &p.CompleteTimerTaskRequest{ + return s.ExecutionManager.CompleteTimerTask(ctx, &persistence.CompleteTimerTaskRequest{ VisibilityTimestamp: ts, TaskID: taskID, }) @@ -1604,7 +1603,7 @@ func (s *TestBase) CompleteTimerTask(ctx context.Context, ts time.Time, taskID i // RangeCompleteTimerTask is a utility method to complete a range of timer tasks func (s *TestBase) RangeCompleteTimerTask(ctx context.Context, inclusiveBeginTimestamp time.Time, exclusiveEndTimestamp time.Time) error { for { - resp, err := s.ExecutionManager.RangeCompleteTimerTask(ctx, &p.RangeCompleteTimerTaskRequest{ + resp, err := s.ExecutionManager.RangeCompleteTimerTask(ctx, &persistence.RangeCompleteTimerTaskRequest{ InclusiveBeginTimestamp: inclusiveBeginTimestamp, ExclusiveEndTimestamp: exclusiveEndTimestamp, PageSize: 1, @@ -1612,7 +1611,7 @@ func (s *TestBase) RangeCompleteTimerTask(ctx context.Context, inclusiveBeginTim if err != nil { return err } - if !p.HasMoreRowsToDelete(resp.TasksCompleted, 1) { + if !persistence.HasMoreRowsToDelete(resp.TasksCompleted, 1) { break } } @@ -1622,10 +1621,10 @@ func (s *TestBase) RangeCompleteTimerTask(ctx context.Context, inclusiveBeginTim // CreateDecisionTask is a utility method to create a task func (s *TestBase) CreateDecisionTask(ctx context.Context, domainID string, workflowExecution types.WorkflowExecution, taskList string, decisionScheduleID int64) (int64, error) { - leaseResponse, err := s.TaskMgr.LeaseTaskList(ctx, &p.LeaseTaskListRequest{ + leaseResponse, err := s.TaskMgr.LeaseTaskList(ctx, &persistence.LeaseTaskListRequest{ DomainID: domainID, TaskList: taskList, - TaskType: p.TaskListTypeDecision, + TaskType: persistence.TaskListTypeDecision, }) if err != nil { return 0, err @@ -1635,11 +1634,11 @@ func (s *TestBase) CreateDecisionTask(ctx context.Context, domainID string, work leaseResponse.TaskListInfo.LastUpdated = time.Time{} taskID := s.GetNextSequenceNumber() - tasks := []*p.CreateTaskInfo{ + tasks := []*persistence.CreateTaskInfo{ { TaskID: taskID, Execution: workflowExecution, - Data: &p.TaskInfo{ + Data: &persistence.TaskInfo{ DomainID: domainID, WorkflowID: workflowExecution.WorkflowID, RunID: workflowExecution.RunID, @@ -1649,7 +1648,7 @@ func (s *TestBase) CreateDecisionTask(ctx context.Context, domainID string, work }, } - _, err = s.TaskMgr.CreateTasks(ctx, &p.CreateTasksRequest{ + _, err = s.TaskMgr.CreateTasks(ctx, &persistence.CreateTasksRequest{ TaskListInfo: leaseResponse.TaskListInfo, Tasks: tasks, }) @@ -1665,13 +1664,13 @@ func (s *TestBase) CreateDecisionTask(ctx context.Context, domainID string, work func (s *TestBase) CreateActivityTasks(ctx context.Context, domainID string, workflowExecution types.WorkflowExecution, activities map[int64]string) ([]int64, error) { - taskLists := make(map[string]*p.TaskListInfo) + taskLists := make(map[string]*persistence.TaskListInfo) for _, tl := range activities { _, ok := taskLists[tl] if !ok { resp, err := s.TaskMgr.LeaseTaskList( ctx, - &p.LeaseTaskListRequest{DomainID: domainID, TaskList: tl, TaskType: p.TaskListTypeActivity}) + &persistence.LeaseTaskListRequest{DomainID: domainID, TaskList: tl, TaskType: persistence.TaskListTypeActivity}) if err != nil { return []int64{}, err } @@ -1684,11 +1683,11 @@ func (s *TestBase) CreateActivityTasks(ctx context.Context, domainID string, wor var taskIDs []int64 for activityScheduleID, taskList := range activities { taskID := s.GetNextSequenceNumber() - tasks := []*p.CreateTaskInfo{ + tasks := []*persistence.CreateTaskInfo{ { TaskID: taskID, Execution: workflowExecution, - Data: &p.TaskInfo{ + Data: &persistence.TaskInfo{ DomainID: domainID, WorkflowID: workflowExecution.WorkflowID, RunID: workflowExecution.RunID, @@ -1698,7 +1697,7 @@ func (s *TestBase) CreateActivityTasks(ctx context.Context, domainID string, wor }, }, } - _, err := s.TaskMgr.CreateTasks(ctx, &p.CreateTasksRequest{ + _, err := s.TaskMgr.CreateTasks(ctx, &persistence.CreateTasksRequest{ TaskListInfo: taskLists[taskList], Tasks: tasks, }) @@ -1712,8 +1711,8 @@ func (s *TestBase) CreateActivityTasks(ctx context.Context, domainID string, wor } // GetTasks is a utility method to get tasks from persistence -func (s *TestBase) GetTasks(ctx context.Context, domainID, taskList string, taskType int, batchSize int) (*p.GetTasksResponse, error) { - response, err := s.TaskMgr.GetTasks(ctx, &p.GetTasksRequest{ +func (s *TestBase) GetTasks(ctx context.Context, domainID, taskList string, taskType int, batchSize int) (*persistence.GetTasksResponse, error) { + response, err := s.TaskMgr.GetTasks(ctx, &persistence.GetTasksRequest{ DomainID: domainID, TaskList: taskList, TaskType: taskType, @@ -1725,13 +1724,13 @@ func (s *TestBase) GetTasks(ctx context.Context, domainID, taskList string, task return nil, err } - return &p.GetTasksResponse{Tasks: response.Tasks}, nil + return &persistence.GetTasksResponse{Tasks: response.Tasks}, nil } // CompleteTask is a utility method to complete a task func (s *TestBase) CompleteTask(ctx context.Context, domainID, taskList string, taskType int, taskID int64, ackLevel int64) error { - return s.TaskMgr.CompleteTask(ctx, &p.CompleteTaskRequest{ - TaskList: &p.TaskListInfo{ + return s.TaskMgr.CompleteTask(ctx, &persistence.CompleteTaskRequest{ + TaskList: &persistence.TaskListInfo{ DomainID: domainID, AckLevel: ackLevel, TaskType: taskType, @@ -1847,7 +1846,7 @@ func (s *TestBase) Publish( } func isMessageIDConflictError(err error) bool { - _, ok := err.(*p.ConditionFailedError) + _, ok := err.(*persistence.ConditionFailedError) return ok } @@ -1856,7 +1855,7 @@ func (s *TestBase) GetReplicationMessages( ctx context.Context, lastMessageID int64, maxCount int, -) ([]*p.QueueMessage, error) { +) ([]*persistence.QueueMessage, error) { return s.DomainReplicationQueueMgr.ReadMessages(ctx, lastMessageID, maxCount) } @@ -1906,7 +1905,7 @@ func (s *TestBase) GetMessagesFromDomainDLQ( lastMessageID int64, pageSize int, pageToken []byte, -) ([]*p.QueueMessage, []byte, error) { +) ([]*persistence.QueueMessage, []byte, error) { return s.DomainReplicationQueueMgr.ReadMessagesFromDLQ( ctx, diff --git a/common/persistence/persistenceMetricClients.go b/common/persistence/persistenceMetricClients.go index 037aa587160..a4d86f58e6b 100644 --- a/common/persistence/persistenceMetricClients.go +++ b/common/persistence/persistenceMetricClients.go @@ -258,7 +258,7 @@ func (p *persistenceMetricsClientBase) call(scope int, op func() error) error { p.metricClient.IncCounter(scope, metrics.PersistenceRequests) before := time.Now() err := op() - duration := time.Now().Sub(before) + duration := time.Since(before) p.metricClient.RecordTimer(scope, metrics.PersistenceLatency, duration) if p.enableLatencyHistogramMetrics { p.metricClient.RecordHistogramDuration(scope, metrics.PersistenceLatencyHistogram, duration) diff --git a/common/persistence/sql/common.go b/common/persistence/sql/common.go index b6992366c93..942df4c0d33 100644 --- a/common/persistence/sql/common.go +++ b/common/persistence/sql/common.go @@ -27,11 +27,10 @@ import ( "encoding/gob" "fmt" - "github.com/uber/cadence/common/persistence/serialization" - "github.com/uber/cadence/common/log" "github.com/uber/cadence/common/log/tag" "github.com/uber/cadence/common/persistence" + "github.com/uber/cadence/common/persistence/serialization" "github.com/uber/cadence/common/persistence/sql/sqlplugin" "github.com/uber/cadence/common/types" ) @@ -105,7 +104,7 @@ func serializePageToken(offset int64) []byte { func deserializePageToken(payload []byte) (int64, error) { if len(payload) != 8 { - return 0, fmt.Errorf("Invalid token of %v length", len(payload)) + return 0, fmt.Errorf("invalid token of %v length", len(payload)) } return int64(binary.LittleEndian.Uint64(payload)), nil } diff --git a/common/persistence/sql/sqlDomainStore.go b/common/persistence/sql/sqlDomainStore.go index e1210638acd..5c165ea07c6 100644 --- a/common/persistence/sql/sqlDomainStore.go +++ b/common/persistence/sql/sqlDomainStore.go @@ -69,7 +69,7 @@ func updateMetadata(ctx context.Context, tx sqlplugin.Tx, oldNotificationVersion } } else if rowsAffected != 1 { return &types.InternalServiceError{ - Message: fmt.Sprintf("Failed to update domain metadata. <>1 rows affected."), + Message: "Failed to update domain metadata. <>1 rows affected.", } } diff --git a/common/persistence/sql/sqlExecutionStore.go b/common/persistence/sql/sqlExecutionStore.go index dff5761ffe6..15d0d1cf547 100644 --- a/common/persistence/sql/sqlExecutionStore.go +++ b/common/persistence/sql/sqlExecutionStore.go @@ -428,7 +428,7 @@ func (m *sqlExecutionStore) updateWorkflowExecutionTx( if !bytes.Equal(domainID, newDomainID) { return &types.InternalServiceError{ - Message: fmt.Sprintf("UpdateWorkflowExecution: cannot continue as new to another domain"), + Message: "UpdateWorkflowExecution: cannot continue as new to another domain", } } @@ -1460,7 +1460,7 @@ func (m *sqlExecutionStore) populateWorkflowMutableState( BackoffCoefficient: info.GetRetryBackoffCoefficient(), MaximumInterval: info.GetRetryMaximumInterval(), MaximumAttempts: info.GetRetryMaximumAttempts(), - ExpirationSeconds: info.GetRetryExpiration(), + ExpirationInterval: info.GetRetryExpiration(), ExpirationTime: info.GetRetryExpirationTimestamp(), BranchToken: info.GetEventBranchToken(), ExecutionContext: info.GetExecutionContext(), diff --git a/common/persistence/sql/sqlExecutionStoreUtil.go b/common/persistence/sql/sqlExecutionStoreUtil.go index 5f12631ded8..cbf4a138529 100644 --- a/common/persistence/sql/sqlExecutionStoreUtil.go +++ b/common/persistence/sql/sqlExecutionStoreUtil.go @@ -1224,54 +1224,6 @@ func assertRunIDAndUpdateCurrentExecution( return updateCurrentExecution(ctx, tx, shardID, domainID, workflowID, newRunID, createRequestID, state, closeStatus, startVersion, lastWriteVersion) } -func assertAndUpdateCurrentExecution( - ctx context.Context, - tx sqlplugin.Tx, - shardID int, - domainID serialization.UUID, - workflowID string, - newRunID serialization.UUID, - previousRunID serialization.UUID, - previousLastWriteVersion int64, - previousState int, - createRequestID string, - state int, - closeStatus int, - startVersion int64, - lastWriteVersion int64, -) error { - - assertFn := func(currentRow *sqlplugin.CurrentExecutionsRow) error { - if !bytes.Equal(currentRow.RunID, previousRunID) { - return &p.ConditionFailedError{Msg: fmt.Sprintf( - "assertAndUpdateCurrentExecution failed. Current run ID was %v, expected %v", - currentRow.RunID, - previousRunID, - )} - } - if currentRow.LastWriteVersion != previousLastWriteVersion { - return &p.ConditionFailedError{Msg: fmt.Sprintf( - "assertAndUpdateCurrentExecution failed. Current last write version was %v, expected %v", - currentRow.LastWriteVersion, - previousLastWriteVersion, - )} - } - if currentRow.State != previousState { - return &p.ConditionFailedError{Msg: fmt.Sprintf( - "assertAndUpdateCurrentExecution failed. Current state %v, expected %v", - currentRow.State, - previousState, - )} - } - return nil - } - if err := assertCurrentExecution(ctx, tx, shardID, domainID, workflowID, assertFn); err != nil { - return err - } - - return updateCurrentExecution(ctx, tx, shardID, domainID, workflowID, newRunID, createRequestID, state, closeStatus, startVersion, lastWriteVersion) -} - func assertCurrentExecution( ctx context.Context, tx sqlplugin.Tx, @@ -1393,7 +1345,7 @@ func buildExecutionRow( RetryBackoffCoefficient: executionInfo.BackoffCoefficient, RetryMaximumInterval: executionInfo.MaximumInterval, RetryMaximumAttempts: executionInfo.MaximumAttempts, - RetryExpiration: executionInfo.ExpirationSeconds, + RetryExpiration: executionInfo.ExpirationInterval, RetryExpirationTimestamp: executionInfo.ExpirationTime, RetryNonRetryableErrors: executionInfo.NonRetriableErrors, EventStoreVersion: p.EventStoreVersion, diff --git a/common/persistence/sql/sqlHistoryStore.go b/common/persistence/sql/sqlHistoryStore.go index 80352541167..719c496a65c 100644 --- a/common/persistence/sql/sqlHistoryStore.go +++ b/common/persistence/sql/sqlHistoryStore.go @@ -33,7 +33,6 @@ import ( "github.com/uber/cadence/common" "github.com/uber/cadence/common/log" - p "github.com/uber/cadence/common/persistence" "github.com/uber/cadence/common/persistence/sql/sqlplugin" ) @@ -56,7 +55,7 @@ func NewHistoryV2Persistence( db sqlplugin.DB, logger log.Logger, parser serialization.Parser, -) (p.HistoryStore, error) { +) (persistence.HistoryStore, error) { return &sqlHistoryStore{ sqlStore: sqlStore{ @@ -70,15 +69,15 @@ func NewHistoryV2Persistence( // AppendHistoryNodes add(or override) a node to a history branch func (m *sqlHistoryStore) AppendHistoryNodes( ctx context.Context, - request *p.InternalAppendHistoryNodesRequest, + request *persistence.InternalAppendHistoryNodesRequest, ) error { branchInfo := request.BranchInfo beginNodeID := persistenceutils.GetBeginNodeID(branchInfo) if request.NodeID < beginNodeID { - return &p.InvalidPersistenceRequestError{ - Msg: fmt.Sprintf("cannot append to ancestors' nodes"), + return &persistence.InvalidPersistenceRequestError{ + Msg: "cannot append to ancestors' nodes", } } @@ -94,9 +93,7 @@ func (m *sqlHistoryStore) AppendHistoryNodes( if request.IsNewBranch { var ancestors []*types.HistoryBranchRange - for _, anc := range branchInfo.Ancestors { - ancestors = append(ancestors, anc) - } + ancestors = append(ancestors, branchInfo.Ancestors...) treeInfo := &serialization.HistoryTreeInfo{ Ancestors: ancestors, @@ -149,7 +146,7 @@ func (m *sqlHistoryStore) AppendHistoryNodes( _, err := m.db.InsertIntoHistoryNode(ctx, nodeRow) if err != nil { if m.db.IsDupEntryError(err) { - return &p.ConditionFailedError{Msg: fmt.Sprintf("AppendHistoryNodes: row already exist: %v", err)} + return &persistence.ConditionFailedError{Msg: fmt.Sprintf("AppendHistoryNodes: row already exist: %v", err)} } return convertCommonErrors(m.db, "AppendHistoryEvents", "", err) } @@ -159,8 +156,8 @@ func (m *sqlHistoryStore) AppendHistoryNodes( // ReadHistoryBranch returns history node data for a branch func (m *sqlHistoryStore) ReadHistoryBranch( ctx context.Context, - request *p.InternalReadHistoryBranchRequest, -) (*p.InternalReadHistoryBranchResponse, error) { + request *persistence.InternalReadHistoryBranchRequest, +) (*persistence.InternalReadHistoryBranchResponse, error) { minNodeID := request.MinNodeID maxNodeID := request.MaxNodeID @@ -191,14 +188,14 @@ func (m *sqlHistoryStore) ReadHistoryBranch( rows, err := m.db.SelectFromHistoryNode(ctx, filter) if err == sql.ErrNoRows || (err == nil && len(rows) == 0) { - return &p.InternalReadHistoryBranchResponse{}, nil + return &persistence.InternalReadHistoryBranchResponse{}, nil } if err != nil { return nil, convertCommonErrors(m.db, "ReadHistoryBranch", "", err) } - history := make([]*p.DataBlob, 0, int(request.PageSize)) - eventBlob := &p.DataBlob{} + history := make([]*persistence.DataBlob, 0, int(request.PageSize)) + eventBlob := &persistence.DataBlob{} for _, row := range rows { eventBlob.Data = row.Data @@ -217,7 +214,7 @@ func (m *sqlHistoryStore) ReadHistoryBranch( // -> batch with higher transaction ID is valid if row.NodeID < lastNodeID { return nil, &types.InternalDataInconsistencyError{ - Message: fmt.Sprintf("corrupted data, nodeID cannot decrease"), + Message: "corrupted data, nodeID cannot decrease", } } else if row.NodeID > lastNodeID { // update lastNodeID so that our pagination can make progress in the corner case that @@ -231,18 +228,18 @@ func (m *sqlHistoryStore) ReadHistoryBranch( switch { case row.NodeID < lastNodeID: return nil, &types.InternalDataInconsistencyError{ - Message: fmt.Sprintf("corrupted data, nodeID cannot decrease"), + Message: "corrupted data, nodeID cannot decrease", } case row.NodeID == lastNodeID: return nil, &types.InternalDataInconsistencyError{ - Message: fmt.Sprintf("corrupted data, same nodeID must have smaller txnID"), + Message: "corrupted data, same nodeID must have smaller txnID", } default: // row.NodeID > lastNodeID: // NOTE: when row.nodeID > lastNodeID, we expect the one with largest txnID comes first lastTxnID = *row.TxnID lastNodeID = row.NodeID history = append(history, eventBlob) - eventBlob = &p.DataBlob{} + eventBlob = &persistence.DataBlob{} } } @@ -251,7 +248,7 @@ func (m *sqlHistoryStore) ReadHistoryBranch( pagingToken = serializePageToken(lastNodeID) } - return &p.InternalReadHistoryBranchResponse{ + return &persistence.InternalReadHistoryBranchResponse{ History: history, NextPageToken: pagingToken, LastNodeID: lastNodeID, @@ -305,8 +302,8 @@ func (m *sqlHistoryStore) ReadHistoryBranch( // func (m *sqlHistoryStore) ForkHistoryBranch( ctx context.Context, - request *p.InternalForkHistoryBranchRequest, -) (*p.InternalForkHistoryBranchResponse, error) { + request *persistence.InternalForkHistoryBranchRequest, +) (*persistence.InternalForkHistoryBranchResponse, error) { forkB := request.ForkBranchInfo treeID := forkB.TreeID @@ -337,7 +334,7 @@ func (m *sqlHistoryStore) ForkHistoryBranch( }) } - resp := &p.InternalForkHistoryBranchResponse{ + resp := &persistence.InternalForkHistoryBranchResponse{ NewBranchInfo: types.HistoryBranch{ TreeID: treeID, BranchID: request.NewBranchID, @@ -379,7 +376,7 @@ func (m *sqlHistoryStore) ForkHistoryBranch( // DeleteHistoryBranch removes a branch func (m *sqlHistoryStore) DeleteHistoryBranch( ctx context.Context, - request *p.InternalDeleteHistoryBranchRequest, + request *persistence.InternalDeleteHistoryBranchRequest, ) error { branch := request.BranchInfo @@ -391,7 +388,7 @@ func (m *sqlHistoryStore) DeleteHistoryBranch( BeginNodeID: beginNodeID, }) - rsp, err := m.GetHistoryTree(ctx, &p.InternalGetHistoryTreeRequest{ + rsp, err := m.GetHistoryTree(ctx, &persistence.InternalGetHistoryTreeRequest{ TreeID: treeID, ShardID: common.IntPtr(request.ShardID), }) @@ -462,8 +459,8 @@ func (m *sqlHistoryStore) DeleteHistoryBranch( // TODO: Limit the underlying query to a specific shard at a time. See https://github.com/uber/cadence/issues/4064 func (m *sqlHistoryStore) GetAllHistoryTreeBranches( ctx context.Context, - request *p.GetAllHistoryTreeBranchesRequest, -) (*p.GetAllHistoryTreeBranchesResponse, error) { + request *persistence.GetAllHistoryTreeBranchesRequest, +) (*persistence.GetAllHistoryTreeBranchesResponse, error) { page := historyTreePageToken{} if request.NextPageToken != nil { if err := gobDeserialize(request.NextPageToken, &page); err != nil { @@ -484,13 +481,13 @@ func (m *sqlHistoryStore) GetAllHistoryTreeBranches( } rows, err := m.db.GetAllHistoryTreeBranches(ctx, &filter) if err == sql.ErrNoRows || (err == nil && len(rows) == 0) { - return &p.GetAllHistoryTreeBranchesResponse{}, nil + return &persistence.GetAllHistoryTreeBranchesResponse{}, nil } if err != nil { return nil, convertCommonErrors(m.db, "GetAllHistoryTreeBranches", "", err) } - resp := &p.GetAllHistoryTreeBranchesResponse{} - resp.Branches = make([]p.HistoryBranchDetail, len(rows)) + resp := &persistence.GetAllHistoryTreeBranchesResponse{} + resp.Branches = make([]persistence.HistoryBranchDetail, len(rows)) for i, row := range rows { treeInfo, err := m.parser.HistoryTreeInfoFromBlob(row.Data, row.DataEncoding) if err != nil { @@ -509,6 +506,10 @@ func (m *sqlHistoryStore) GetAllHistoryTreeBranches( TreeID: lastRow.TreeID, BranchID: lastRow.BranchID, }) + if err != nil { + return nil, &types.InternalServiceError{Message: fmt.Sprintf("error serializing nextPageToken:%v", err)} + } + } // TODO: this is broken for multi-sharding: the shardID should increase if there are less rows than request pageSize, // until loop over all shards @@ -518,8 +519,8 @@ func (m *sqlHistoryStore) GetAllHistoryTreeBranches( // GetHistoryTree returns all branch information of a tree func (m *sqlHistoryStore) GetHistoryTree( ctx context.Context, - request *p.InternalGetHistoryTreeRequest, -) (*p.InternalGetHistoryTreeResponse, error) { + request *persistence.InternalGetHistoryTreeRequest, +) (*persistence.InternalGetHistoryTreeResponse, error) { treeID := serialization.MustParseUUID(request.TreeID) branches := make([]*types.HistoryBranch, 0) @@ -530,7 +531,7 @@ func (m *sqlHistoryStore) GetHistoryTree( } rows, err := m.db.SelectFromHistoryTree(ctx, treeFilter) if err == sql.ErrNoRows || (err == nil && len(rows) == 0) { - return &p.InternalGetHistoryTreeResponse{}, nil + return &persistence.InternalGetHistoryTreeResponse{}, nil } if err != nil { return nil, convertCommonErrors(m.db, "GetHistoryTree", "", err) @@ -548,7 +549,7 @@ func (m *sqlHistoryStore) GetHistoryTree( branches = append(branches, br) } - return &p.InternalGetHistoryTreeResponse{ + return &persistence.InternalGetHistoryTreeResponse{ Branches: branches, }, nil } diff --git a/common/persistence/sql/sqlQueueStore.go b/common/persistence/sql/sqlQueueStore.go index 7f478280fe3..884653fdfcf 100644 --- a/common/persistence/sql/sqlQueueStore.go +++ b/common/persistence/sql/sqlQueueStore.go @@ -31,10 +31,6 @@ import ( "github.com/uber/cadence/common/types" ) -const ( - emptyMessageID = -1 -) - type ( sqlQueueStore struct { queueType persistence.QueueType @@ -178,7 +174,7 @@ func (q *sqlQueueStore) ReadMessagesFromDLQ( pageToken []byte, ) ([]*persistence.InternalQueueMessage, []byte, error) { - if pageToken != nil && len(pageToken) != 0 { + if len(pageToken) != 0 { lastReadMessageID, err := deserializePageToken(pageToken) if err != nil { return nil, nil, &types.InternalServiceError{ diff --git a/common/persistence/sql/sqldriver/connections.go b/common/persistence/sql/sqldriver/connections.go index 6128ed125f8..0e3415052e6 100644 --- a/common/persistence/sql/sqldriver/connections.go +++ b/common/persistence/sql/sqldriver/connections.go @@ -54,7 +54,7 @@ func CreateDBConnections(cfg *config.SQL, createConnFunc CreateSingleDBConn) ([] cfg.ConnectAddr = "" }() - xdbs := make([]*sqlx.DB, cfg.NumShards, cfg.NumShards) + xdbs := make([]*sqlx.DB, cfg.NumShards) for idx, entry := range cfg.MultipleDatabasesConfig { cfg.User = entry.User cfg.Password = entry.Password diff --git a/common/persistence/sql/sqldriver/sharded.go b/common/persistence/sql/sqldriver/sharded.go index 079d663c462..fd451840c7a 100644 --- a/common/persistence/sql/sqldriver/sharded.go +++ b/common/persistence/sql/sqldriver/sharded.go @@ -39,8 +39,6 @@ type ( useTx bool // if tx is not nil, the methods from commonOfDbAndTx should use tx currTxShardID int // which shard is current tx started from } - - shardedSqlExecResult struct{} ) // newShardedSQLDriver returns a driver querying a group of SQL databases as sharded solution. diff --git a/common/persistence/sql/sqlplugin/mysql/db.go b/common/persistence/sql/sqlplugin/mysql/db.go index 343651a8d46..e5aeb59a332 100644 --- a/common/persistence/sql/sqlplugin/mysql/db.go +++ b/common/persistence/sql/sqlplugin/mysql/db.go @@ -58,10 +58,7 @@ func (mdb *db) IsDupEntryError(err error) bool { } func (mdb *db) IsNotFoundError(err error) bool { - if err == sql.ErrNoRows { - return true - } - return false + return err == sql.ErrNoRows } func (mdb *db) IsTimeoutError(err error) bool { diff --git a/common/persistence/sql/sqlplugin/postgres/db.go b/common/persistence/sql/sqlplugin/postgres/db.go index 6dcff8af656..2389145be68 100644 --- a/common/persistence/sql/sqlplugin/postgres/db.go +++ b/common/persistence/sql/sqlplugin/postgres/db.go @@ -61,17 +61,11 @@ func (pdb *db) IsDupEntryError(err error) bool { } func (pdb *db) IsNotFoundError(err error) bool { - if err == sql.ErrNoRows { - return true - } - return false + return err == sql.ErrNoRows } func (pdb *db) IsTimeoutError(err error) bool { - if err == context.DeadlineExceeded { - return true - } - return false + return err == context.DeadlineExceeded } func (pdb *db) IsThrottlingError(err error) bool { diff --git a/common/persistence/visibilitySingleManager.go b/common/persistence/visibilitySingleManager.go index d31d8480e27..479a087c725 100644 --- a/common/persistence/visibilitySingleManager.go +++ b/common/persistence/visibilitySingleManager.go @@ -101,7 +101,7 @@ func (v *visibilityManagerImpl) RecordWorkflowExecutionClosed( CloseTimestamp: time.Unix(0, request.CloseTimestamp), Status: request.Status, HistoryLength: request.HistoryLength, - RetentionSeconds: common.SecondsToDuration(request.RetentionSeconds), + RetentionPeriod: common.SecondsToDuration(request.RetentionSeconds), IsCron: request.IsCron, NumClusters: request.NumClusters, } @@ -385,20 +385,6 @@ func (v *visibilityManagerImpl) convertVisibilityWorkflowExecutionInfo(execution return convertedExecution } -func (v *visibilityManagerImpl) fromInternalListWorkflowExecutionsRequest(internalReq *InternalListWorkflowExecutionsRequest) *ListWorkflowExecutionsRequest { - if internalReq == nil { - return nil - } - return &ListWorkflowExecutionsRequest{ - DomainUUID: internalReq.DomainUUID, - Domain: internalReq.Domain, - EarliestTime: internalReq.EarliestTime.UnixNano(), - LatestTime: internalReq.LatestTime.UnixNano(), - PageSize: internalReq.PageSize, - NextPageToken: internalReq.NextPageToken, - } -} - func (v *visibilityManagerImpl) toInternalListWorkflowExecutionsRequest(req *ListWorkflowExecutionsRequest) *InternalListWorkflowExecutionsRequest { if req == nil { return nil diff --git a/common/reconciliation/fetcher/concrete.go b/common/reconciliation/fetcher/concrete.go index f905d72d49e..f94288c0740 100644 --- a/common/reconciliation/fetcher/concrete.go +++ b/common/reconciliation/fetcher/concrete.go @@ -62,6 +62,9 @@ func ConcreteExecution( } branchToken, branch, err := getBranchToken(e.State.ExecutionInfo.BranchToken, e.State.VersionHistories, codec.NewThriftRWEncoder()) + if err != nil { + return nil, err + } return &entity.ConcreteExecution{ BranchToken: branchToken, @@ -93,7 +96,7 @@ func getConcreteExecutions( if err != nil { return pagination.Page{}, err } - executions := make([]pagination.Entity, len(resp.Executions), len(resp.Executions)) + executions := make([]pagination.Entity, len(resp.Executions)) for i, e := range resp.Executions { branchToken, branch, err := getBranchToken(e.ExecutionInfo.BranchToken, e.VersionHistories, encoder) if err != nil { diff --git a/common/reconciliation/fetcher/current.go b/common/reconciliation/fetcher/current.go index 6e59a0800dd..dbc49d0e635 100644 --- a/common/reconciliation/fetcher/current.go +++ b/common/reconciliation/fetcher/current.go @@ -81,7 +81,7 @@ func getCurrentExecution( if err != nil { return pagination.Page{}, err } - executions := make([]pagination.Entity, len(resp.Executions), len(resp.Executions)) + executions := make([]pagination.Entity, len(resp.Executions)) for i, e := range resp.Executions { currentExec := &entity.CurrentExecution{ CurrentRunID: e.CurrentRunID, diff --git a/common/reconciliation/invariant/invariantManager_test.go b/common/reconciliation/invariant/invariantManager_test.go index 4bd8da876de..ded01c88cbd 100644 --- a/common/reconciliation/invariant/invariantManager_test.go +++ b/common/reconciliation/invariant/invariantManager_test.go @@ -227,7 +227,7 @@ func (s *InvariantManagerSuite) TestRunChecks() { } for _, tc := range testCases { - invariants := make([]Invariant, len(tc.checkResults), len(tc.checkResults)) + invariants := make([]Invariant, len(tc.checkResults)) for i := 0; i < len(tc.checkResults); i++ { mockInvariant := NewMockInvariant(s.controller) mockInvariant.EXPECT().Check(gomock.Any(), gomock.Any()).Return(tc.checkResults[i]) @@ -636,7 +636,7 @@ func (s *InvariantManagerSuite) TestRunFixes() { } for _, tc := range testCases { - invariants := make([]Invariant, len(tc.fixResults), len(tc.fixResults)) + invariants := make([]Invariant, len(tc.fixResults)) for i := 0; i < len(tc.fixResults); i++ { mockInvariant := NewMockInvariant(s.controller) mockInvariant.EXPECT().Fix(gomock.Any(), gomock.Any()).Return(tc.fixResults[i]) diff --git a/common/rpc/middleware.go b/common/rpc/middleware.go index a0b548eb4c0..42a0f58c2d9 100644 --- a/common/rpc/middleware.go +++ b/common/rpc/middleware.go @@ -51,7 +51,9 @@ func (m *authOutboundMiddleware) Call(ctx context.Context, request *transport.Re return out.Call(ctx, request) } -const _responseInfoContextKey = "response-info" +type contextKey string + +const _responseInfoContextKey = contextKey("response-info") // ContextWithResponseInfo will create a child context that has ResponseInfo set as value. // This value will get filled after the call is made and can be used later to retrieve some info of interest. diff --git a/common/testing/event_generator.go b/common/testing/event_generator.go index 40286bda3fc..7716a1100f2 100644 --- a/common/testing/event_generator.go +++ b/common/testing/event_generator.go @@ -73,13 +73,6 @@ type ( condition func(...interface{}) bool action func() } - - // RevokeFunc is the condition inside edge - // The function used to check if the edge is accessible at a certain state - RevokeFunc struct { - methodName string //nolint - input []interface{} //nolint - } ) // NewEventGenerator initials the event generator @@ -356,12 +349,6 @@ func (g *EventGenerator) pickRandomVertex( return endVertex.DeepCopy() } -func (g *EventGenerator) shouldBumpVersion() bool { - // 1//1000 to bump the version - //return g.dice.Intn(1000) == 500 - return false -} - // NewHistoryEventEdge initials a new edge between two HistoryEventVertexes func NewHistoryEventEdge( start Vertex, diff --git a/common/util.go b/common/util.go index 368c6957f84..0e802d770c4 100644 --- a/common/util.go +++ b/common/util.go @@ -158,7 +158,7 @@ func CreatePersistenceRetryPolicyWithContext(ctx context.Context) backoff.RetryP policy := backoff.NewExponentialRetryPolicy(retryPersistenceOperationInitialInterval) policy.SetMaximumInterval(retryPersistenceOperationMaxInterval) - policy.SetExpirationInterval(deadline.Sub(time.Now())) + policy.SetExpirationInterval(time.Until(deadline)) return policy } @@ -876,15 +876,15 @@ func ConvertDynamicConfigMapPropertyToIntMap( } var intValue int - switch value.(type) { + switch value := value.(type) { case float64: - intValue = int(value.(float64)) + intValue = int(value) case int: - intValue = value.(int) + intValue = value case int32: - intValue = int(value.(int32)) + intValue = int(value) case int64: - intValue = int(value.(int64)) + intValue = int(value) default: return nil, fmt.Errorf("unknown value %v with type %T", value, value) } diff --git a/host/activity_test.go b/host/activity_test.go index 0b080fed8f1..678432ee3c1 100644 --- a/host/activity_test.go +++ b/host/activity_test.go @@ -646,6 +646,7 @@ func (s *IntegrationSuite) TestActivityHeartBeatWorkflow_Timeout() { s.True(err == nil || err == matching.ErrNoTasks) err = poller.PollAndProcessActivityTask(false) + s.Error(err) s.Logger.Info("Waiting for workflow to complete", tag.WorkflowRunID(we.RunID)) diff --git a/host/integration_test.go b/host/integration_test.go index 66f8cb61707..a4866e870f8 100644 --- a/host/integration_test.go +++ b/host/integration_test.go @@ -1249,7 +1249,7 @@ func (s *IntegrationSuite) TestCronWorkflow() { // Make sure the cron workflow start running at a proper time, in this case 3 seconds after the // startWorkflowExecution request - backoffDuration := time.Now().Sub(startWorkflowTS) + backoffDuration := time.Since(startWorkflowTS) s.True(backoffDuration > targetBackoffDuration) s.True(backoffDuration < targetBackoffDuration+backoffDurationTolerance) @@ -1613,12 +1613,12 @@ func (s *IntegrationSuite) TestRateLimitBufferedEvents() { // Buffered Signals for i := 0; i < 100; i++ { buf := new(bytes.Buffer) - binary.Write(buf, binary.LittleEndian, i) + binary.Write(buf, binary.LittleEndian, int64(i)) s.Nil(s.sendSignal(s.domainName, workflowExecution, "SignalName", buf.Bytes(), identity)) } buf := new(bytes.Buffer) - binary.Write(buf, binary.LittleEndian, 101) + binary.Write(buf, binary.LittleEndian, int64(101)) signalErr := s.sendSignal(s.domainName, workflowExecution, "SignalName", buf.Bytes(), identity) s.Nil(signalErr) @@ -2404,7 +2404,7 @@ func (s *IntegrationSuite) TestCronChildWorkflowExecution() { s.Logger.Info("PollAndProcessDecisionTask", tag.Error(err)) s.Nil(err) - backoffDuration := time.Now().Sub(startChildWorkflowTS) + backoffDuration := time.Since(startChildWorkflowTS) s.True(backoffDuration < targetBackoffDuration+backoffDurationTolerance) startChildWorkflowTS = time.Now() } @@ -3256,7 +3256,7 @@ func (s *IntegrationSuite) TestTaskProcessingProtectionForRateLimitError() { // Send one signal to create a new decision buf := new(bytes.Buffer) - binary.Write(buf, binary.LittleEndian, 0) + binary.Write(buf, binary.LittleEndian, int64(0)) s.Nil(s.sendSignal(s.domainName, workflowExecution, "SignalName", buf.Bytes(), identity)) // Drop decision to cause all events to be buffered from now on @@ -3267,13 +3267,13 @@ func (s *IntegrationSuite) TestTaskProcessingProtectionForRateLimitError() { // Buffered 100 Signals for i := 1; i < 101; i++ { buf := new(bytes.Buffer) - binary.Write(buf, binary.LittleEndian, i) + binary.Write(buf, binary.LittleEndian, int64(i)) s.Nil(s.sendSignal(s.domainName, workflowExecution, "SignalName", buf.Bytes(), identity)) } // 101 signal, which will fail the decision buf = new(bytes.Buffer) - binary.Write(buf, binary.LittleEndian, 101) + binary.Write(buf, binary.LittleEndian, int64(101)) signalErr := s.sendSignal(s.domainName, workflowExecution, "SignalName", buf.Bytes(), identity) s.Nil(signalErr) @@ -3392,6 +3392,7 @@ func (s *IntegrationSuite) TestStickyTimeout_NonTransientDecision() { Identity: identity, RequestID: uuid.New(), }) + s.NoError(err) // Wait for decision timeout stickyTimeout := false @@ -3443,6 +3444,7 @@ WaitForStickyTimeoutLoop: // Complete workflow execution _, err = poller.PollAndProcessDecisionTaskWithAttempt(true, false, false, true, int64(2)) + s.NoError(err) // Assert for single decision task failed and workflow completion failedDecisions := 0 @@ -3554,6 +3556,7 @@ func (s *IntegrationSuite) TestStickyTasklistResetThenTimeout() { Identity: identity, RequestID: uuid.New(), }) + s.NoError(err) //Reset sticky tasklist before sticky decision task starts s.engine.ResetStickyTaskList(createContext(), &types.ResetStickyTaskListRequest{ @@ -3615,6 +3618,7 @@ WaitForStickyTimeoutLoop: // Complete workflow execution _, err = poller.PollAndProcessDecisionTaskWithAttempt(true, false, false, true, int64(2)) + s.NoError(err) // Assert for single decision task failed and workflow completion failedDecisions := 0 diff --git a/host/reset_workflow_test.go b/host/reset_workflow_test.go index 775fd1fea7a..a933bb492dd 100644 --- a/host/reset_workflow_test.go +++ b/host/reset_workflow_test.go @@ -404,7 +404,7 @@ func (s *IntegrationSuite) TestResetWorkflow_NoDecisionTaskCompleted() { s.Logger.Info("PollAndProcessWorkflowTask", tag.Error(err)) s.NoError(err) - events = s.getHistory(s.domainName, &types.WorkflowExecution{ + s.getHistory(s.domainName, &types.WorkflowExecution{ WorkflowID: id, RunID: newRunID, }) diff --git a/host/signal_workflow_test.go b/host/signal_workflow_test.go index fb8cf4f2092..193faae25aa 100644 --- a/host/signal_workflow_test.go +++ b/host/signal_workflow_test.go @@ -637,7 +637,7 @@ func (s *IntegrationSuite) TestSignalWorkflow_Cron_NoDecisionTaskCreated() { var decisionTaskDelay time.Duration dtHandler := func(execution *types.WorkflowExecution, wt *types.WorkflowType, previousStartedEventID, startedEventID int64, history *types.History) ([]byte, []*types.Decision, error) { - decisionTaskDelay = time.Now().Sub(now) + decisionTaskDelay = time.Since(now) return nil, []*types.Decision{{ DecisionType: types.DecisionTypeCompleteWorkflowExecution.Ptr(), diff --git a/host/taskpoller.go b/host/taskpoller.go index 290fad46756..09caf4e1c3b 100644 --- a/host/taskpoller.go +++ b/host/taskpoller.go @@ -160,7 +160,7 @@ Loop: } events = history.Events - if events == nil || len(events) == 0 { + if len(events) == 0 { p.Logger.Fatal("History Events are empty") } @@ -293,7 +293,7 @@ func (p *TaskPoller) HandlePartialDecision(response *types.PollForDecisionTaskRe } events = history.Events - if events == nil || len(events) == 0 { + if len(events) == 0 { p.Logger.Fatal("History Events are empty") } diff --git a/host/xdc/integration_failover_test.go b/host/xdc/integration_failover_test.go deleted file mode 100644 index 2605c0d8585..00000000000 --- a/host/xdc/integration_failover_test.go +++ /dev/null @@ -1,2054 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:build !race -// +build !race - -// need to run xdc tests with race detector off because of ringpop bug causing data race issue - -package xdc - -import ( - "bytes" - "context" - "encoding/binary" - "errors" - "flag" - "io/ioutil" - "os" - "strconv" - "testing" - "time" - - "github.com/pborman/uuid" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "go.uber.org/zap" - "gopkg.in/yaml.v2" - - "github.com/uber/cadence/client/frontend" - "github.com/uber/cadence/common" - "github.com/uber/cadence/common/cache" - "github.com/uber/cadence/common/log" - "github.com/uber/cadence/common/log/loggerimpl" - "github.com/uber/cadence/common/log/tag" - "github.com/uber/cadence/common/types" - "github.com/uber/cadence/environment" - "github.com/uber/cadence/host" -) - -type ( - integrationClustersTestSuite struct { - // override suite.Suite.Assertions with require.Assertions; this means that s.NotNil(nil) will stop the test, - // not merely log an error - *require.Assertions - suite.Suite - cluster1 *host.TestCluster - cluster2 *host.TestCluster - logger log.Logger - } -) - -const ( - cacheRefreshInterval = cache.DomainCacheRefreshInterval + 5*time.Second -) - -var ( - clusterName = []string{"active", "standby"} - clusterReplicationConfig = []*types.ClusterReplicationConfiguration{ - { - ClusterName: clusterName[0], - }, - { - ClusterName: clusterName[1], - }, - } -) - -func createContext() context.Context { - ctx, _ := context.WithTimeout(context.Background(), 90*time.Second) - return ctx -} - -func TestIntegrationClustersTestSuite(t *testing.T) { - flag.Parse() - // TODO: Suite is disabled since it was intented to be deprecated. - // Until we have a certain decision, disabling the suite run - // suite.Run(t, new(integrationClustersTestSuite)) -} - -func (s *integrationClustersTestSuite) SetupSuite() { - zapLogger, err := zap.NewDevelopment() - // cannot use s.Nil since it is not initialized - s.Require().NoError(err) - s.logger = loggerimpl.NewLogger(zapLogger) - - fileName := "../testdata/xdc_integration_test_clusters.yaml" - if host.TestFlags.TestClusterConfigFile != "" { - fileName = host.TestFlags.TestClusterConfigFile - } - environment.SetupEnv() - - confContent, err := ioutil.ReadFile(fileName) - s.Require().NoError(err) - confContent = []byte(os.ExpandEnv(string(confContent))) - - var clusterConfigs []*host.TestClusterConfig - s.Require().NoError(yaml.Unmarshal(confContent, &clusterConfigs)) - - /* - // TODO: following lines are failing build; it was introduced after integration tests refactor. - // Looks like this test is deprecated, decide if we want to delete the whole test. - // Commenting the build-failing parts until we have a decision - c, err := host.NewCluster(clusterConfigs[0], s.logger.WithTags(tag.ClusterName(clusterName[0]))) - s.Require().NoError(err) - s.cluster1 = c - - c, err = host.NewCluster(clusterConfigs[1], s.logger.WithTags(tag.ClusterName(clusterName[1]))) - s.Require().NoError(err) - s.cluster2 = c - */ -} - -func (s *integrationClustersTestSuite) SetupTest() { - // Have to define our overridden assertions in the test setup. If we did it earlier, s.T() will return nil - s.Assertions = require.New(s.T()) -} - -func (s *integrationClustersTestSuite) TearDownSuite() { - s.cluster1.TearDownCluster() - s.cluster2.TearDownCluster() -} - -func (s *integrationClustersTestSuite) TestDomainFailover() { - domainName := "test-domain-for-fail-over-" + common.GenerateRandomString(5) - client1 := s.cluster1.GetFrontendClient() // active - regReq := &types.RegisterDomainRequest{ - Name: domainName, - IsGlobalDomain: true, - Clusters: clusterReplicationConfig, - ActiveClusterName: clusterName[0], - WorkflowExecutionRetentionPeriodInDays: 7, - } - err := client1.RegisterDomain(createContext(), regReq) - s.NoError(err) - - descReq := &types.DescribeDomainRequest{ - Name: common.StringPtr(domainName), - } - resp, err := client1.DescribeDomain(createContext(), descReq) - s.NoError(err) - s.NotNil(resp) - // Wait for domain cache to pick the change - time.Sleep(cacheRefreshInterval) - - client2 := s.cluster2.GetFrontendClient() // standby - resp2, err := client2.DescribeDomain(createContext(), descReq) - s.NoError(err) - s.NotNil(resp2) - s.Equal(resp, resp2) - - // update domain to fail over - updateReq := &types.UpdateDomainRequest{ - Name: domainName, - ActiveClusterName: common.StringPtr(clusterName[1]), - } - updateResp, err := client1.UpdateDomain(createContext(), updateReq) - s.NoError(err) - s.NotNil(updateResp) - s.Equal(clusterName[1], updateResp.ReplicationConfiguration.GetActiveClusterName()) - s.Equal(int64(1), updateResp.GetFailoverVersion()) - - updated := false - var resp3 *types.DescribeDomainResponse - for i := 0; i < 30; i++ { - resp3, err = client2.DescribeDomain(createContext(), descReq) - s.NoError(err) - if resp3.ReplicationConfiguration.GetActiveClusterName() == clusterName[1] { - updated = true - break - } - time.Sleep(500 * time.Millisecond) - } - s.True(updated) - s.NotNil(resp3) - s.Equal(int64(1), resp3.GetFailoverVersion()) - - // start workflow in new cluster - id := "integration-domain-failover-test" - wt := "integration-domain-failover-test-type" - tl := "integration-domain-failover-test-tasklist" - identity := "worker1" - workflowType := &types.WorkflowType{Name: wt} - taskList := &types.TaskList{Name: tl} - startReq := &types.StartWorkflowExecutionRequest{ - RequestID: uuid.New(), - Domain: domainName, - WorkflowID: id, - WorkflowType: workflowType, - TaskList: taskList, - Input: nil, - ExecutionStartToCloseTimeoutSeconds: common.Int32Ptr(100), - TaskStartToCloseTimeoutSeconds: common.Int32Ptr(1), - Identity: identity, - } - var we *types.StartWorkflowExecutionResponse - for i := 0; i < 30; i++ { - we, err = client2.StartWorkflowExecution(createContext(), startReq) - if err == nil { - break - } - time.Sleep(500 * time.Millisecond) - } - s.NoError(err) - s.NotNil(we.GetRunID()) -} - -func (s *integrationClustersTestSuite) TestSimpleWorkflowFailover() { - domainName := "test-simple-workflow-failover-" + common.GenerateRandomString(5) - client1 := s.cluster1.GetFrontendClient() // active - regReq := &types.RegisterDomainRequest{ - Name: domainName, - IsGlobalDomain: true, - Clusters: clusterReplicationConfig, - ActiveClusterName: clusterName[0], - WorkflowExecutionRetentionPeriodInDays: 1, - } - err := client1.RegisterDomain(createContext(), regReq) - s.NoError(err) - - descReq := &types.DescribeDomainRequest{ - Name: common.StringPtr(domainName), - } - resp, err := client1.DescribeDomain(createContext(), descReq) - s.NoError(err) - s.NotNil(resp) - // Wait for domain cache to pick the change - time.Sleep(cache.DomainCacheRefreshInterval) - - client2 := s.cluster2.GetFrontendClient() // standby - resp2, err := client2.DescribeDomain(createContext(), descReq) - s.NoError(err) - s.NotNil(resp2) - s.Equal(resp, resp2) - - // start a workflow - id := "integration-simple-workflow-failover-test" - wt := "integration-simple-workflow-failover-test-type" - tl := "integration-simple-workflow-failover-test-tasklist" - identity := "worker1" - workflowType := &types.WorkflowType{Name: wt} - taskList := &types.TaskList{Name: tl} - startReq := &types.StartWorkflowExecutionRequest{ - RequestID: uuid.New(), - Domain: domainName, - WorkflowID: id, - WorkflowType: workflowType, - TaskList: taskList, - Input: nil, - ExecutionStartToCloseTimeoutSeconds: common.Int32Ptr(100), - TaskStartToCloseTimeoutSeconds: common.Int32Ptr(1), - Identity: identity, - } - we, err := client1.StartWorkflowExecution(createContext(), startReq) - s.Nil(err) - s.NotNil(we.GetRunID()) - rid := we.GetRunID() - - s.logger.Info("StartWorkflowExecution \n", tag.WorkflowRunID(we.GetRunID())) - - workflowComplete := false - activityName := "activity_type1" - activityCount := int32(1) - activityCounter := int32(0) - dtHandler := func(execution *types.WorkflowExecution, wt *types.WorkflowType, - previousStartedEventID, startedEventID int64, history *types.History) ([]byte, []*types.Decision, error) { - if activityCounter < activityCount { - activityCounter++ - buf := new(bytes.Buffer) - s.Nil(binary.Write(buf, binary.LittleEndian, activityCounter)) - - return []byte(strconv.Itoa(int(activityCounter))), []*types.Decision{{ - DecisionType: types.DecisionTypeScheduleActivityTask.Ptr(), - ScheduleActivityTaskDecisionAttributes: &types.ScheduleActivityTaskDecisionAttributes{ - ActivityID: strconv.Itoa(int(activityCounter)), - ActivityType: &types.ActivityType{Name: activityName}, - TaskList: &types.TaskList{Name: tl}, - Input: buf.Bytes(), - ScheduleToCloseTimeoutSeconds: common.Int32Ptr(100), - ScheduleToStartTimeoutSeconds: common.Int32Ptr(30), - StartToCloseTimeoutSeconds: common.Int32Ptr(50), - HeartbeatTimeoutSeconds: common.Int32Ptr(20), - }, - }}, nil - } - - workflowComplete = true - return []byte(strconv.Itoa(int(activityCounter))), []*types.Decision{{ - DecisionType: types.DecisionTypeCompleteWorkflowExecution.Ptr(), - CompleteWorkflowExecutionDecisionAttributes: &types.CompleteWorkflowExecutionDecisionAttributes{ - Result: []byte("Done."), - }, - }}, nil - } - - atHandler := func(execution *types.WorkflowExecution, activityType *types.ActivityType, - activityID string, input []byte, taskToken []byte) ([]byte, bool, error) { - - return []byte("Activity Result."), false, nil - } - - queryType := "test-query" - queryHandler := func(task *types.PollForDecisionTaskResponse) ([]byte, error) { - s.NotNil(task.Query) - s.NotNil(task.Query.QueryType) - if task.Query.QueryType == queryType { - return []byte("query-result"), nil - } - - return nil, errors.New("unknown-query-type") - } - - poller := host.TaskPoller{ - Engine: client1, - Domain: domainName, - TaskList: taskList, - Identity: identity, - DecisionHandler: dtHandler, - ActivityHandler: atHandler, - QueryHandler: queryHandler, - Logger: s.logger, - T: s.T(), - } - - poller2 := host.TaskPoller{ - Engine: client2, - Domain: domainName, - TaskList: taskList, - Identity: identity, - DecisionHandler: dtHandler, - ActivityHandler: atHandler, - QueryHandler: queryHandler, - Logger: s.logger, - T: s.T(), - } - - // make some progress in cluster 1 - _, err = poller.PollAndProcessDecisionTask(false, false) - s.logger.Info("PollAndProcessDecisionTask", tag.Error(err)) - s.Nil(err) - - type QueryResult struct { - Resp *types.QueryWorkflowResponse - Err error - } - queryResultCh := make(chan QueryResult) - queryWorkflowFn := func(client frontend.Client, queryType string) { - queryResp, err := client.QueryWorkflow(createContext(), &types.QueryWorkflowRequest{ - Domain: domainName, - Execution: &types.WorkflowExecution{ - WorkflowID: id, - RunID: we.RunID, - }, - Query: &types.WorkflowQuery{ - QueryType: queryType, - }, - }) - queryResultCh <- QueryResult{Resp: queryResp, Err: err} - } - - // call QueryWorkflow in separate goroutinue (because it is blocking). That will generate a query task - go queryWorkflowFn(client1, queryType) - // process that query task, which should respond via RespondQueryTaskCompleted - for { - // loop until process the query task - isQueryTask, errInner := poller.PollAndProcessDecisionTask(false, false) - s.logger.Info("PollAndProcessQueryTask", tag.Error(err)) - s.Nil(errInner) - if isQueryTask { - break - } - } - // wait until query result is ready - queryResult := <-queryResultCh - s.NoError(queryResult.Err) - s.NotNil(queryResult.Resp) - s.NotNil(queryResult.Resp.QueryResult) - queryResultString := string(queryResult.Resp.QueryResult) - s.Equal("query-result", queryResultString) - - // Wait a while so the events are replicated. - time.Sleep(5 * time.Second) - - // call QueryWorkflow in separate goroutinue (because it is blocking). That will generate a query task - go queryWorkflowFn(client2, queryType) - // process that query task, which should respond via RespondQueryTaskCompleted - for { - // loop until process the query task - isQueryTask, errInner := poller2.PollAndProcessDecisionTask(false, false) - s.logger.Info("PollAndProcessQueryTask", tag.Error(err)) - s.Nil(errInner) - if isQueryTask { - break - } - } - // wait until query result is ready - queryResult = <-queryResultCh - s.NoError(queryResult.Err) - s.NotNil(queryResult.Resp) - s.NotNil(queryResult.Resp.QueryResult) - queryResultString = string(queryResult.Resp.QueryResult) - s.Equal("query-result", queryResultString) - - // update domain to fail over - updateReq := &types.UpdateDomainRequest{ - Name: domainName, - ActiveClusterName: common.StringPtr(clusterName[1]), - } - updateResp, err := client1.UpdateDomain(createContext(), updateReq) - s.NoError(err) - s.NotNil(updateResp) - s.Equal(clusterName[1], updateResp.ReplicationConfiguration.GetActiveClusterName()) - s.Equal(int64(1), updateResp.GetFailoverVersion()) - - // wait till failover completed - time.Sleep(cacheRefreshInterval) - - // check history matched - getHistoryReq := &types.GetWorkflowExecutionHistoryRequest{ - Domain: domainName, - Execution: &types.WorkflowExecution{ - WorkflowID: id, - RunID: rid, - }, - } - var historyResponse *types.GetWorkflowExecutionHistoryResponse - eventsReplicated := false - for i := 0; i < 15; i++ { - historyResponse, err = client2.GetWorkflowExecutionHistory(createContext(), getHistoryReq) - if err == nil && len(historyResponse.History.Events) == 5 { - eventsReplicated = true - break - } - time.Sleep(1 * time.Second) - } - s.Nil(err) - s.True(eventsReplicated) - - // Make sure query is still working after failover - // call QueryWorkflow in separate goroutinue (because it is blocking). That will generate a query task - go queryWorkflowFn(client1, queryType) - // process that query task, which should respond via RespondQueryTaskCompleted - for { - // loop until process the query task - isQueryTask, errInner := poller.PollAndProcessDecisionTask(false, false) - s.logger.Info("PollAndProcessDecisionTask", tag.Error(err)) - s.Nil(errInner) - if isQueryTask { - break - } - } - // wait until query result is ready - queryResult = <-queryResultCh - s.NoError(queryResult.Err) - s.NotNil(queryResult.Resp) - s.NotNil(queryResult.Resp.QueryResult) - queryResultString = string(queryResult.Resp.QueryResult) - s.Equal("query-result", queryResultString) - - // call QueryWorkflow in separate goroutinue (because it is blocking). That will generate a query task - go queryWorkflowFn(client2, queryType) - // process that query task, which should respond via RespondQueryTaskCompleted - for { - // loop until process the query task - isQueryTask, errInner := poller2.PollAndProcessDecisionTask(false, false) - s.logger.Info("PollAndProcessDecisionTask", tag.Error(err)) - s.Nil(errInner) - if isQueryTask { - break - } - } - // wait until query result is ready - queryResult = <-queryResultCh - s.NoError(queryResult.Err) - s.NotNil(queryResult.Resp) - s.NotNil(queryResult.Resp.QueryResult) - queryResultString = string(queryResult.Resp.QueryResult) - s.Equal("query-result", queryResultString) - - // make process in cluster 2 - err = poller2.PollAndProcessActivityTask(false) - s.logger.Info("PollAndProcessActivityTask 2", tag.Error(err)) - s.Nil(err) - - s.False(workflowComplete) - _, err = poller2.PollAndProcessDecisionTask(false, false) - s.logger.Info("PollAndProcessDecisionTask 2", tag.Error(err)) - s.Nil(err) - s.True(workflowComplete) - - // check history replicated in cluster 1 - eventsReplicated = false - for i := 0; i < 15; i++ { - historyResponse, err = client1.GetWorkflowExecutionHistory(createContext(), getHistoryReq) - if err == nil && len(historyResponse.History.Events) == 11 { - eventsReplicated = true - break - } - time.Sleep(1 * time.Second) - } - s.Nil(err) - s.True(eventsReplicated) -} - -func (s *integrationClustersTestSuite) TestStickyDecisionFailover() { - domainName := "test-sticky-decision-workflow-failover-" + common.GenerateRandomString(5) - client1 := s.cluster1.GetFrontendClient() // active - regReq := &types.RegisterDomainRequest{ - Name: domainName, - IsGlobalDomain: true, - Clusters: clusterReplicationConfig, - ActiveClusterName: clusterName[0], - WorkflowExecutionRetentionPeriodInDays: 1, - } - err := client1.RegisterDomain(createContext(), regReq) - s.NoError(err) - - descReq := &types.DescribeDomainRequest{ - Name: common.StringPtr(domainName), - } - resp, err := client1.DescribeDomain(createContext(), descReq) - s.NoError(err) - s.NotNil(resp) - // Wait for domain cache to pick the change - time.Sleep(cacheRefreshInterval) - - client2 := s.cluster2.GetFrontendClient() // standby - - // Start a workflow - id := "integration-sticky-decision-workflow-failover-test" - wt := "integration-sticky-decision-workflow-failover-test-type" - tl := "integration-sticky-decision-workflow-failover-test-tasklist" - stl1 := "integration-sticky-decision-workflow-failover-test-tasklist-sticky1" - stl2 := "integration-sticky-decision-workflow-failover-test-tasklist-sticky2" - identity1 := "worker1" - identity2 := "worker2" - - workflowType := &types.WorkflowType{Name: wt} - taskList := &types.TaskList{Name: tl} - stickyTaskList1 := &types.TaskList{Name: stl1} - stickyTaskList2 := &types.TaskList{Name: stl2} - stickyTaskTimeout := common.Int32Ptr(100) - startReq := &types.StartWorkflowExecutionRequest{ - RequestID: uuid.New(), - Domain: domainName, - WorkflowID: id, - WorkflowType: workflowType, - TaskList: taskList, - Input: nil, - ExecutionStartToCloseTimeoutSeconds: common.Int32Ptr(2592000), - TaskStartToCloseTimeoutSeconds: common.Int32Ptr(60), - Identity: identity1, - } - we, err := client1.StartWorkflowExecution(createContext(), startReq) - s.NoError(err) - s.NotNil(we.GetRunID()) - - s.logger.Info("StartWorkflowExecution", tag.WorkflowRunID(we.GetRunID())) - - firstDecisionMade := false - secondDecisionMade := false - workflowCompleted := false - dtHandler := func(execution *types.WorkflowExecution, wt *types.WorkflowType, - previousStartedEventID, startedEventID int64, history *types.History) ([]byte, []*types.Decision, error) { - if !firstDecisionMade { - firstDecisionMade = true - return nil, []*types.Decision{}, nil - } - - if !secondDecisionMade { - secondDecisionMade = true - return nil, []*types.Decision{}, nil - } - - workflowCompleted = true - return nil, []*types.Decision{{ - DecisionType: types.DecisionTypeCompleteWorkflowExecution.Ptr(), - CompleteWorkflowExecutionDecisionAttributes: &types.CompleteWorkflowExecutionDecisionAttributes{ - Result: []byte("Done."), - }, - }}, nil - } - - poller1 := &host.TaskPoller{ - Engine: client1, - Domain: domainName, - TaskList: taskList, - StickyTaskList: stickyTaskList1, - StickyScheduleToStartTimeoutSeconds: stickyTaskTimeout, - Identity: identity1, - DecisionHandler: dtHandler, - Logger: s.logger, - T: s.T(), - } - - poller2 := &host.TaskPoller{ - Engine: client2, - Domain: domainName, - TaskList: taskList, - StickyTaskList: stickyTaskList2, - StickyScheduleToStartTimeoutSeconds: stickyTaskTimeout, - Identity: identity2, - DecisionHandler: dtHandler, - Logger: s.logger, - T: s.T(), - } - - _, err = poller1.PollAndProcessDecisionTaskWithAttemptAndRetry(false, false, false, true, 0, 5) - s.logger.Info("PollAndProcessDecisionTask", tag.Error(err)) - s.Nil(err) - s.True(firstDecisionMade) - - // Send a signal in cluster - signalName := "my signal" - signalInput := []byte("my signal input.") - err = client1.SignalWorkflowExecution(createContext(), &types.SignalWorkflowExecutionRequest{ - Domain: domainName, - WorkflowExecution: &types.WorkflowExecution{ - WorkflowID: id, - RunID: we.GetRunID(), - }, - SignalName: signalName, - Input: signalInput, - Identity: identity1, - }) - s.Nil(err) - - // Update domain to fail over - updateReq := &types.UpdateDomainRequest{ - Name: domainName, - ActiveClusterName: common.StringPtr(clusterName[1]), - } - updateResp, err := client1.UpdateDomain(createContext(), updateReq) - s.NoError(err) - s.NotNil(updateResp) - s.Equal(clusterName[1], updateResp.ReplicationConfiguration.GetActiveClusterName()) - s.Equal(int64(1), updateResp.GetFailoverVersion()) - - // Wait for domain cache to pick the change - time.Sleep(cacheRefreshInterval) - - _, err = poller2.PollAndProcessDecisionTaskWithAttemptAndRetry(false, false, false, true, 0, 5) - s.logger.Info("PollAndProcessDecisionTask", tag.Error(err)) - s.Nil(err) - s.True(secondDecisionMade) - - err = client2.SignalWorkflowExecution(createContext(), &types.SignalWorkflowExecutionRequest{ - Domain: domainName, - WorkflowExecution: &types.WorkflowExecution{ - WorkflowID: id, - RunID: we.GetRunID(), - }, - SignalName: signalName, - Input: signalInput, - Identity: identity2, - }) - s.Nil(err) - - // Update domain to fail over back - updateReq = &types.UpdateDomainRequest{ - Name: domainName, - ActiveClusterName: common.StringPtr(clusterName[0]), - } - updateResp, err = client2.UpdateDomain(createContext(), updateReq) - s.NoError(err) - s.NotNil(updateResp) - s.Equal(clusterName[0], updateResp.ReplicationConfiguration.GetActiveClusterName()) - s.Equal(int64(10), updateResp.GetFailoverVersion()) - - _, err = poller1.PollAndProcessDecisionTask(false, false) - s.logger.Info("PollAndProcessDecisionTask", tag.Error(err)) - s.Nil(err) - s.True(workflowCompleted) -} - -func (s *integrationClustersTestSuite) TestStartWorkflowExecution_Failover_WorkflowIDReusePolicy() { - domainName := "test-start-workflow-failover-ID-reuse-policy" + common.GenerateRandomString(5) - client1 := s.cluster1.GetFrontendClient() // active - regReq := &types.RegisterDomainRequest{ - Name: domainName, - IsGlobalDomain: true, - Clusters: clusterReplicationConfig, - ActiveClusterName: clusterName[0], - WorkflowExecutionRetentionPeriodInDays: 1, - } - err := client1.RegisterDomain(createContext(), regReq) - s.NoError(err) - - descReq := &types.DescribeDomainRequest{ - Name: common.StringPtr(domainName), - } - resp, err := client1.DescribeDomain(createContext(), descReq) - s.NoError(err) - s.NotNil(resp) - // Wait for domain cache to pick the change - time.Sleep(cache.DomainCacheRefreshInterval) - - client2 := s.cluster2.GetFrontendClient() // standby - resp2, err := client2.DescribeDomain(createContext(), descReq) - s.NoError(err) - s.NotNil(resp2) - s.Equal(resp, resp2) - - // start a workflow - id := "integration-start-workflow-failover-ID-reuse-policy-test" - wt := "integration-start-workflow-failover-ID-reuse-policy-test-type" - tl := "integration-start-workflow-failover-ID-reuse-policy-test-tasklist" - identity := "worker1" - workflowType := &types.WorkflowType{Name: wt} - taskList := &types.TaskList{Name: tl} - startReq := &types.StartWorkflowExecutionRequest{ - RequestID: uuid.New(), - Domain: domainName, - WorkflowID: id, - WorkflowType: workflowType, - TaskList: taskList, - Input: nil, - ExecutionStartToCloseTimeoutSeconds: common.Int32Ptr(100), - TaskStartToCloseTimeoutSeconds: common.Int32Ptr(1), - Identity: identity, - WorkflowIDReusePolicy: types.WorkflowIDReusePolicyAllowDuplicate.Ptr(), - } - we, err := client1.StartWorkflowExecution(createContext(), startReq) - s.Nil(err) - s.NotNil(we.GetRunID()) - s.logger.Info("StartWorkflowExecution in cluster 1: ", tag.WorkflowRunID(we.GetRunID())) - - workflowCompleteTimes := 0 - dtHandler := func(execution *types.WorkflowExecution, wt *types.WorkflowType, - previousStartedEventID, startedEventID int64, history *types.History) ([]byte, []*types.Decision, error) { - - workflowCompleteTimes++ - return nil, []*types.Decision{{ - DecisionType: types.DecisionTypeCompleteWorkflowExecution.Ptr(), - CompleteWorkflowExecutionDecisionAttributes: &types.CompleteWorkflowExecutionDecisionAttributes{ - Result: []byte("Done."), - }, - }}, nil - } - - poller := host.TaskPoller{ - Engine: client1, - Domain: domainName, - TaskList: taskList, - Identity: identity, - DecisionHandler: dtHandler, - ActivityHandler: nil, - Logger: s.logger, - T: s.T(), - } - - poller2 := host.TaskPoller{ - Engine: client2, - Domain: domainName, - TaskList: taskList, - Identity: identity, - DecisionHandler: dtHandler, - ActivityHandler: nil, - Logger: s.logger, - T: s.T(), - } - - // Complete the workflow in cluster 1 - _, err = poller.PollAndProcessDecisionTask(false, false) - s.logger.Info("PollAndProcessDecisionTask", tag.Error(err)) - s.Nil(err) - s.Equal(1, workflowCompleteTimes) - - // update domain to fail over - updateReq := &types.UpdateDomainRequest{ - Name: domainName, - ActiveClusterName: common.StringPtr(clusterName[1]), - } - updateResp, err := client1.UpdateDomain(createContext(), updateReq) - s.NoError(err) - s.NotNil(updateResp) - s.Equal(clusterName[1], updateResp.ReplicationConfiguration.GetActiveClusterName()) - s.Equal(int64(1), updateResp.GetFailoverVersion()) - - // wait till failover completed - time.Sleep(cacheRefreshInterval) - - // start the same workflow in cluster 2 is not allowed if policy is AllowDuplicateFailedOnly - startReq.RequestID = uuid.New() - startReq.WorkflowIDReusePolicy = types.WorkflowIDReusePolicyAllowDuplicateFailedOnly.Ptr() - we, err = client2.StartWorkflowExecution(createContext(), startReq) - s.IsType(&types.WorkflowExecutionAlreadyStartedError{}, err) - s.Nil(we) - - // start the same workflow in cluster 2 is not allowed if policy is RejectDuplicate - startReq.RequestID = uuid.New() - startReq.WorkflowIDReusePolicy = types.WorkflowIDReusePolicyRejectDuplicate.Ptr() - we, err = client2.StartWorkflowExecution(createContext(), startReq) - s.IsType(&types.WorkflowExecutionAlreadyStartedError{}, err) - s.Nil(we) - - // start the workflow in cluster 2 - startReq.RequestID = uuid.New() - startReq.WorkflowIDReusePolicy = types.WorkflowIDReusePolicyAllowDuplicate.Ptr() - we, err = client2.StartWorkflowExecution(createContext(), startReq) - s.Nil(err) - s.NotNil(we.GetRunID()) - s.logger.Info("StartWorkflowExecution in cluster 2: ", tag.WorkflowRunID(we.GetRunID())) - - _, err = poller2.PollAndProcessDecisionTask(false, false) - s.logger.Info("PollAndProcessDecisionTask 2", tag.Error(err)) - s.Nil(err) - s.Equal(2, workflowCompleteTimes) -} - -func (s *integrationClustersTestSuite) TestTerminateFailover() { - domainName := "test-terminate-workflow-failover-" + common.GenerateRandomString(5) - client1 := s.cluster1.GetFrontendClient() // active - regReq := &types.RegisterDomainRequest{ - Name: domainName, - IsGlobalDomain: true, - Clusters: clusterReplicationConfig, - ActiveClusterName: clusterName[0], - WorkflowExecutionRetentionPeriodInDays: 1, - } - err := client1.RegisterDomain(createContext(), regReq) - s.NoError(err) - - descReq := &types.DescribeDomainRequest{ - Name: common.StringPtr(domainName), - } - resp, err := client1.DescribeDomain(createContext(), descReq) - s.NoError(err) - s.NotNil(resp) - // Wait for domain cache to pick the change - time.Sleep(cacheRefreshInterval) - - client2 := s.cluster2.GetFrontendClient() // standby - - // start a workflow - id := "integration-terminate-workflow-failover-test" - wt := "integration-terminate-workflow-failover-test-type" - tl := "integration-terminate-workflow-failover-test-tasklist" - identity := "worker1" - workflowType := &types.WorkflowType{Name: wt} - taskList := &types.TaskList{Name: tl} - startReq := &types.StartWorkflowExecutionRequest{ - RequestID: uuid.New(), - Domain: domainName, - WorkflowID: id, - WorkflowType: workflowType, - TaskList: taskList, - Input: nil, - ExecutionStartToCloseTimeoutSeconds: common.Int32Ptr(100), - TaskStartToCloseTimeoutSeconds: common.Int32Ptr(1), - Identity: identity, - } - we, err := client1.StartWorkflowExecution(createContext(), startReq) - s.NoError(err) - s.NotNil(we.GetRunID()) - - activityName := "activity_type1" - activityCount := int32(1) - activityCounter := int32(0) - dtHandler := func(execution *types.WorkflowExecution, wt *types.WorkflowType, - previousStartedEventID, startedEventID int64, history *types.History) ([]byte, []*types.Decision, error) { - if activityCounter < activityCount { - activityCounter++ - buf := new(bytes.Buffer) - s.Nil(binary.Write(buf, binary.LittleEndian, activityCounter)) - - return []byte(strconv.Itoa(int(activityCounter))), []*types.Decision{{ - DecisionType: types.DecisionTypeScheduleActivityTask.Ptr(), - ScheduleActivityTaskDecisionAttributes: &types.ScheduleActivityTaskDecisionAttributes{ - ActivityID: strconv.Itoa(int(activityCounter)), - ActivityType: &types.ActivityType{Name: activityName}, - TaskList: &types.TaskList{Name: tl}, - Input: buf.Bytes(), - ScheduleToCloseTimeoutSeconds: common.Int32Ptr(100), - ScheduleToStartTimeoutSeconds: common.Int32Ptr(10), - StartToCloseTimeoutSeconds: common.Int32Ptr(50), - HeartbeatTimeoutSeconds: common.Int32Ptr(5), - }, - }}, nil - } - - return []byte(strconv.Itoa(int(activityCounter))), []*types.Decision{{ - DecisionType: types.DecisionTypeCompleteWorkflowExecution.Ptr(), - CompleteWorkflowExecutionDecisionAttributes: &types.CompleteWorkflowExecutionDecisionAttributes{ - Result: []byte("Done."), - }, - }}, nil - } - - atHandler := func(execution *types.WorkflowExecution, activityType *types.ActivityType, - activityID string, input []byte, taskToken []byte) ([]byte, bool, error) { - - return []byte("Activity Result."), false, nil - } - - poller := &host.TaskPoller{ - Engine: client1, - Domain: domainName, - TaskList: taskList, - Identity: identity, - DecisionHandler: dtHandler, - ActivityHandler: atHandler, - Logger: s.logger, - T: s.T(), - } - - // make some progress in cluster 1 - _, err = poller.PollAndProcessDecisionTask(false, false) - s.logger.Info("PollAndProcessDecisionTask", tag.Error(err)) - s.Nil(err) - - // update domain to fail over - updateReq := &types.UpdateDomainRequest{ - Name: domainName, - ActiveClusterName: common.StringPtr(clusterName[1]), - } - updateResp, err := client1.UpdateDomain(createContext(), updateReq) - s.NoError(err) - s.NotNil(updateResp) - s.Equal(clusterName[1], updateResp.ReplicationConfiguration.GetActiveClusterName()) - s.Equal(int64(1), updateResp.GetFailoverVersion()) - - // wait till failover completed - time.Sleep(cacheRefreshInterval) - - // terminate workflow at cluster 2 - terminateReason := "terminate reason." - terminateDetails := []byte("terminate details.") - err = client2.TerminateWorkflowExecution(createContext(), &types.TerminateWorkflowExecutionRequest{ - Domain: domainName, - WorkflowExecution: &types.WorkflowExecution{ - WorkflowID: id, - }, - Reason: terminateReason, - Details: terminateDetails, - Identity: identity, - }) - s.Nil(err) - - // check terminate done - executionTerminated := false - getHistoryReq := &types.GetWorkflowExecutionHistoryRequest{ - Domain: domainName, - Execution: &types.WorkflowExecution{ - WorkflowID: id, - }, - } -GetHistoryLoop: - for i := 0; i < 10; i++ { - historyResponse, err := client2.GetWorkflowExecutionHistory(createContext(), getHistoryReq) - s.Nil(err) - history := historyResponse.History - - lastEvent := history.Events[len(history.Events)-1] - if *lastEvent.EventType != types.EventTypeWorkflowExecutionTerminated { - s.logger.Warn("Execution not terminated yet.") - time.Sleep(100 * time.Millisecond) - continue GetHistoryLoop - } - - terminateEventAttributes := lastEvent.WorkflowExecutionTerminatedEventAttributes - s.Equal(terminateReason, terminateEventAttributes.Reason) - s.Equal(terminateDetails, terminateEventAttributes.Details) - s.Equal(identity, terminateEventAttributes.Identity) - executionTerminated = true - break GetHistoryLoop - } - s.True(executionTerminated) - - // check history replicated to the other cluster - var historyResponse *types.GetWorkflowExecutionHistoryResponse - eventsReplicated := false -GetHistoryLoop2: - for i := 0; i < 15; i++ { - historyResponse, err = client1.GetWorkflowExecutionHistory(createContext(), getHistoryReq) - if err == nil { - history := historyResponse.History - lastEvent := history.Events[len(history.Events)-1] - if *lastEvent.EventType == types.EventTypeWorkflowExecutionTerminated { - terminateEventAttributes := lastEvent.WorkflowExecutionTerminatedEventAttributes - s.Equal(terminateReason, terminateEventAttributes.Reason) - s.Equal(terminateDetails, terminateEventAttributes.Details) - s.Equal(identity, terminateEventAttributes.Identity) - eventsReplicated = true - break GetHistoryLoop2 - } - } - time.Sleep(1 * time.Second) - } - s.Nil(err) - s.True(eventsReplicated) -} - -func (s *integrationClustersTestSuite) TestContinueAsNewFailover() { - domainName := "test-continueAsNew-workflow-failover-" + common.GenerateRandomString(5) - client1 := s.cluster1.GetFrontendClient() // active - regReq := &types.RegisterDomainRequest{ - Name: domainName, - IsGlobalDomain: true, - Clusters: clusterReplicationConfig, - ActiveClusterName: clusterName[0], - WorkflowExecutionRetentionPeriodInDays: 1, - } - err := client1.RegisterDomain(createContext(), regReq) - s.NoError(err) - - descReq := &types.DescribeDomainRequest{ - Name: common.StringPtr(domainName), - } - resp, err := client1.DescribeDomain(createContext(), descReq) - s.NoError(err) - s.NotNil(resp) - // Wait for domain cache to pick the change - time.Sleep(cacheRefreshInterval) - - client2 := s.cluster2.GetFrontendClient() // standby - - // start a workflow - id := "integration-continueAsNew-workflow-failover-test" - wt := "integration-continueAsNew-workflow-failover-test-type" - tl := "integration-continueAsNew-workflow-failover-test-tasklist" - identity := "worker1" - workflowType := &types.WorkflowType{Name: wt} - taskList := &types.TaskList{Name: tl} - startReq := &types.StartWorkflowExecutionRequest{ - RequestID: uuid.New(), - Domain: domainName, - WorkflowID: id, - WorkflowType: workflowType, - TaskList: taskList, - Input: nil, - ExecutionStartToCloseTimeoutSeconds: common.Int32Ptr(100), - TaskStartToCloseTimeoutSeconds: common.Int32Ptr(1), - Identity: identity, - } - we, err := client1.StartWorkflowExecution(createContext(), startReq) - s.NoError(err) - s.NotNil(we.GetRunID()) - - workflowComplete := false - continueAsNewCount := int32(5) - continueAsNewCounter := int32(0) - var previousRunID string - var lastRunStartedEvent *types.HistoryEvent - dtHandler := func(execution *types.WorkflowExecution, wt *types.WorkflowType, - previousStartedEventID, startedEventID int64, history *types.History) ([]byte, []*types.Decision, error) { - if continueAsNewCounter < continueAsNewCount { - previousRunID = execution.GetRunID() - continueAsNewCounter++ - buf := new(bytes.Buffer) - s.Nil(binary.Write(buf, binary.LittleEndian, continueAsNewCounter)) - - return []byte(strconv.Itoa(int(continueAsNewCounter))), []*types.Decision{{ - DecisionType: types.DecisionTypeContinueAsNewWorkflowExecution.Ptr(), - ContinueAsNewWorkflowExecutionDecisionAttributes: &types.ContinueAsNewWorkflowExecutionDecisionAttributes{ - WorkflowType: workflowType, - TaskList: &types.TaskList{Name: tl}, - Input: buf.Bytes(), - ExecutionStartToCloseTimeoutSeconds: common.Int32Ptr(100), - TaskStartToCloseTimeoutSeconds: common.Int32Ptr(10), - }, - }}, nil - } - - lastRunStartedEvent = history.Events[0] - workflowComplete = true - return []byte(strconv.Itoa(int(continueAsNewCounter))), []*types.Decision{{ - DecisionType: types.DecisionTypeCompleteWorkflowExecution.Ptr(), - CompleteWorkflowExecutionDecisionAttributes: &types.CompleteWorkflowExecutionDecisionAttributes{ - Result: []byte("Done."), - }, - }}, nil - } - - poller := &host.TaskPoller{ - Engine: client1, - Domain: domainName, - TaskList: taskList, - Identity: identity, - DecisionHandler: dtHandler, - Logger: s.logger, - T: s.T(), - } - - poller2 := host.TaskPoller{ - Engine: client2, - Domain: domainName, - TaskList: taskList, - Identity: identity, - DecisionHandler: dtHandler, - Logger: s.logger, - T: s.T(), - } - - // make some progress in cluster 1 and did some continueAsNew - for i := 0; i < 3; i++ { - _, err := poller.PollAndProcessDecisionTask(false, false) - s.logger.Info("PollAndProcessDecisionTask", tag.Error(err)) - s.Nil(err, strconv.Itoa(i)) - } - - // update domain to fail over - updateReq := &types.UpdateDomainRequest{ - Name: domainName, - ActiveClusterName: common.StringPtr(clusterName[1]), - } - updateResp, err := client1.UpdateDomain(createContext(), updateReq) - s.NoError(err) - s.NotNil(updateResp) - s.Equal(clusterName[1], updateResp.ReplicationConfiguration.GetActiveClusterName()) - s.Equal(int64(1), updateResp.GetFailoverVersion()) - - // wait till failover completed - time.Sleep(cacheRefreshInterval) - - // finish the rest in cluster 2 - for i := 0; i < 2; i++ { - _, err := poller2.PollAndProcessDecisionTask(false, false) - s.logger.Info("PollAndProcessDecisionTask", tag.Error(err)) - s.Nil(err, strconv.Itoa(i)) - } - - s.False(workflowComplete) - _, err = poller2.PollAndProcessDecisionTask(false, false) - s.Nil(err) - s.True(workflowComplete) - s.Equal(previousRunID, lastRunStartedEvent.WorkflowExecutionStartedEventAttributes.GetContinuedExecutionRunID()) -} - -func (s *integrationClustersTestSuite) TestSignalFailover() { - domainName := "test-signal-workflow-failover-" + common.GenerateRandomString(5) - client1 := s.cluster1.GetFrontendClient() // active - regReq := &types.RegisterDomainRequest{ - Name: domainName, - IsGlobalDomain: true, - Clusters: clusterReplicationConfig, - ActiveClusterName: clusterName[0], - WorkflowExecutionRetentionPeriodInDays: 1, - } - err := client1.RegisterDomain(createContext(), regReq) - s.NoError(err) - - descReq := &types.DescribeDomainRequest{ - Name: common.StringPtr(domainName), - } - resp, err := client1.DescribeDomain(createContext(), descReq) - s.NoError(err) - s.NotNil(resp) - // Wait for domain cache to pick the change - time.Sleep(cacheRefreshInterval) - - client2 := s.cluster2.GetFrontendClient() // standby - - // Start a workflow - id := "integration-signal-workflow-failover-test" - wt := "integration-signal-workflow-failover-test-type" - tl := "integration-signal-workflow-failover-test-tasklist" - identity := "worker1" - workflowType := &types.WorkflowType{Name: wt} - taskList := &types.TaskList{Name: tl} - startReq := &types.StartWorkflowExecutionRequest{ - RequestID: uuid.New(), - Domain: domainName, - WorkflowID: id, - WorkflowType: workflowType, - TaskList: taskList, - Input: nil, - ExecutionStartToCloseTimeoutSeconds: common.Int32Ptr(300), - TaskStartToCloseTimeoutSeconds: common.Int32Ptr(1), - Identity: identity, - } - we, err := client1.StartWorkflowExecution(createContext(), startReq) - s.NoError(err) - s.NotNil(we.GetRunID()) - - s.logger.Info("StartWorkflowExecution", tag.WorkflowRunID(we.GetRunID())) - - eventSignaled := false - dtHandler := func(execution *types.WorkflowExecution, wt *types.WorkflowType, - previousStartedEventID, startedEventID int64, history *types.History) ([]byte, []*types.Decision, error) { - if !eventSignaled { - for _, event := range history.Events[previousStartedEventID:] { - if *event.EventType == types.EventTypeWorkflowExecutionSignaled { - eventSignaled = true - return nil, []*types.Decision{}, nil - } - } - } - - return nil, []*types.Decision{{ - DecisionType: types.DecisionTypeCompleteWorkflowExecution.Ptr(), - CompleteWorkflowExecutionDecisionAttributes: &types.CompleteWorkflowExecutionDecisionAttributes{ - Result: []byte("Done."), - }, - }}, nil - } - - poller := &host.TaskPoller{ - Engine: client1, - Domain: domainName, - TaskList: taskList, - Identity: identity, - DecisionHandler: dtHandler, - Logger: s.logger, - T: s.T(), - } - - poller2 := &host.TaskPoller{ - Engine: client2, - Domain: domainName, - TaskList: taskList, - Identity: identity, - DecisionHandler: dtHandler, - Logger: s.logger, - T: s.T(), - } - - // Send a signal in cluster 1 - signalName := "my signal" - signalInput := []byte("my signal input.") - err = client1.SignalWorkflowExecution(createContext(), &types.SignalWorkflowExecutionRequest{ - Domain: domainName, - WorkflowExecution: &types.WorkflowExecution{ - WorkflowID: id, - RunID: we.GetRunID(), - }, - SignalName: signalName, - Input: signalInput, - Identity: identity, - }) - s.Nil(err) - - // Process signal in cluster 1 - s.False(eventSignaled) - _, err = poller.PollAndProcessDecisionTask(false, false) - s.logger.Info("PollAndProcessDecisionTask", tag.Error(err)) - s.Nil(err) - s.True(eventSignaled) - - // Update domain to fail over - updateReq := &types.UpdateDomainRequest{ - Name: domainName, - ActiveClusterName: common.StringPtr(clusterName[1]), - } - updateResp, err := client1.UpdateDomain(createContext(), updateReq) - s.NoError(err) - s.NotNil(updateResp) - s.Equal(clusterName[1], updateResp.ReplicationConfiguration.GetActiveClusterName()) - s.Equal(int64(1), updateResp.GetFailoverVersion()) - - // Wait for domain cache to pick the change - time.Sleep(cacheRefreshInterval) - - // check history matched - getHistoryReq := &types.GetWorkflowExecutionHistoryRequest{ - Domain: domainName, - Execution: &types.WorkflowExecution{ - WorkflowID: id, - }, - } - var historyResponse *types.GetWorkflowExecutionHistoryResponse - eventsReplicated := false - for i := 0; i < 15; i++ { - historyResponse, err = client2.GetWorkflowExecutionHistory(createContext(), getHistoryReq) - if err == nil && len(historyResponse.History.Events) == 5 { - eventsReplicated = true - break - } - time.Sleep(1 * time.Second) - } - s.Nil(err) - s.True(eventsReplicated) - - // Send another signal in cluster 2 - signalName2 := "my signal 2" - signalInput2 := []byte("my signal input 2.") - err = client2.SignalWorkflowExecution(createContext(), &types.SignalWorkflowExecutionRequest{ - Domain: domainName, - WorkflowExecution: &types.WorkflowExecution{ - WorkflowID: id, - }, - SignalName: signalName2, - Input: signalInput2, - Identity: identity, - }) - s.Nil(err) - - // Process signal in cluster 2 - eventSignaled = false - _, err = poller2.PollAndProcessDecisionTask(false, false) - s.logger.Info("PollAndProcessDecisionTask 2", tag.Error(err)) - s.Nil(err) - s.True(eventSignaled) - - // check history matched - eventsReplicated = false - for i := 0; i < 15; i++ { - historyResponse, err = client2.GetWorkflowExecutionHistory(createContext(), getHistoryReq) - if err == nil && len(historyResponse.History.Events) == 9 { - eventsReplicated = true - break - } - time.Sleep(1 * time.Second) - } - s.Nil(err) - s.True(eventsReplicated) -} - -func (s *integrationClustersTestSuite) TestUserTimerFailover() { - domainName := "test-user-timer-workflow-failover-" + common.GenerateRandomString(5) - client1 := s.cluster1.GetFrontendClient() // active - regReq := &types.RegisterDomainRequest{ - Name: domainName, - IsGlobalDomain: true, - Clusters: clusterReplicationConfig, - ActiveClusterName: clusterName[0], - WorkflowExecutionRetentionPeriodInDays: 1, - } - err := client1.RegisterDomain(createContext(), regReq) - s.NoError(err) - - descReq := &types.DescribeDomainRequest{ - Name: common.StringPtr(domainName), - } - resp, err := client1.DescribeDomain(createContext(), descReq) - s.NoError(err) - s.NotNil(resp) - // Wait for domain cache to pick the change - time.Sleep(cacheRefreshInterval) - - client2 := s.cluster2.GetFrontendClient() // standby - - // Start a workflow - id := "integration-user-timer-workflow-failover-test" - wt := "integration-user-timer-workflow-failover-test-type" - tl := "integration-user-timer-workflow-failover-test-tasklist" - identity := "worker1" - workflowType := &types.WorkflowType{Name: wt} - taskList := &types.TaskList{Name: tl} - startReq := &types.StartWorkflowExecutionRequest{ - RequestID: uuid.New(), - Domain: domainName, - WorkflowID: id, - WorkflowType: workflowType, - TaskList: taskList, - Input: nil, - ExecutionStartToCloseTimeoutSeconds: common.Int32Ptr(300), - TaskStartToCloseTimeoutSeconds: common.Int32Ptr(10), - Identity: identity, - } - var we *types.StartWorkflowExecutionResponse - for i := 0; i < 10; i++ { - we, err = client1.StartWorkflowExecution(createContext(), startReq) - if err == nil { - break - } - time.Sleep(1 * time.Second) - } - s.NoError(err) - s.NotNil(we.GetRunID()) - - s.logger.Info("StartWorkflowExecution", tag.WorkflowRunID(we.GetRunID())) - - timerCreated := false - timerFired := false - workflowCompleted := false - dtHandler := func(execution *types.WorkflowExecution, wt *types.WorkflowType, - previousStartedEventID, startedEventID int64, history *types.History) ([]byte, []*types.Decision, error) { - - if !timerCreated { - timerCreated = true - - // Send a signal in cluster - signalName := "my signal" - signalInput := []byte("my signal input.") - err = client1.SignalWorkflowExecution(createContext(), &types.SignalWorkflowExecutionRequest{ - Domain: domainName, - WorkflowExecution: &types.WorkflowExecution{ - WorkflowID: id, - RunID: we.GetRunID(), - }, - SignalName: signalName, - Input: signalInput, - Identity: "", - }) - s.Nil(err) - return nil, []*types.Decision{{ - DecisionType: types.DecisionTypeStartTimer.Ptr(), - StartTimerDecisionAttributes: &types.StartTimerDecisionAttributes{ - TimerID: "timer-id", - StartToFireTimeoutSeconds: common.Int64Ptr(2), - }, - }}, nil - } - - if !timerFired { - resp, err := client2.GetWorkflowExecutionHistory(createContext(), &types.GetWorkflowExecutionHistoryRequest{ - Domain: domainName, - Execution: &types.WorkflowExecution{ - WorkflowID: id, - RunID: we.GetRunID(), - }, - }) - s.Nil(err) - for _, event := range resp.History.Events { - if event.GetEventType() == types.EventTypeTimerFired { - timerFired = true - } - } - if !timerFired { - return nil, []*types.Decision{}, nil - } - } - - workflowCompleted = true - return nil, []*types.Decision{{ - DecisionType: types.DecisionTypeCompleteWorkflowExecution.Ptr(), - CompleteWorkflowExecutionDecisionAttributes: &types.CompleteWorkflowExecutionDecisionAttributes{ - Result: []byte("Done."), - }, - }}, nil - } - - poller1 := &host.TaskPoller{ - Engine: client1, - Domain: domainName, - TaskList: taskList, - Identity: identity, - DecisionHandler: dtHandler, - Logger: s.logger, - T: s.T(), - } - - poller2 := &host.TaskPoller{ - Engine: client2, - Domain: domainName, - TaskList: taskList, - Identity: identity, - DecisionHandler: dtHandler, - Logger: s.logger, - T: s.T(), - } - - for i := 0; i < 2; i++ { - _, err = poller1.PollAndProcessDecisionTask(false, false) - if err != nil { - timerCreated = false - continue - } - if timerCreated { - break - } - } - s.True(timerCreated) - - // Update domain to fail over - updateReq := &types.UpdateDomainRequest{ - Name: domainName, - ActiveClusterName: common.StringPtr(clusterName[1]), - } - updateResp, err := client1.UpdateDomain(createContext(), updateReq) - s.NoError(err) - s.NotNil(updateResp) - s.Equal(clusterName[1], updateResp.ReplicationConfiguration.GetActiveClusterName()) - s.Equal(int64(1), updateResp.GetFailoverVersion()) - - // Wait for domain cache to pick the change - time.Sleep(cacheRefreshInterval) - - for i := 1; i < 20; i++ { - if !workflowCompleted { - _, err = poller2.PollAndProcessDecisionTask(false, false) - s.Nil(err) - time.Sleep(time.Second) - } - } -} - -func (s *integrationClustersTestSuite) TestActivityHeartbeatFailover() { - domainName := "test-activity-heartbeat-workflow-failover-" + common.GenerateRandomString(5) - client1 := s.cluster1.GetFrontendClient() // active - regReq := &types.RegisterDomainRequest{ - Name: domainName, - IsGlobalDomain: true, - Clusters: clusterReplicationConfig, - ActiveClusterName: clusterName[0], - WorkflowExecutionRetentionPeriodInDays: 1, - } - err := client1.RegisterDomain(createContext(), regReq) - s.NoError(err) - - descReq := &types.DescribeDomainRequest{ - Name: common.StringPtr(domainName), - } - resp, err := client1.DescribeDomain(createContext(), descReq) - s.NoError(err) - s.NotNil(resp) - // Wait for domain cache to pick the change - time.Sleep(cacheRefreshInterval) - - client2 := s.cluster2.GetFrontendClient() // standby - - // Start a workflow - id := "integration-activity-heartbeat-workflow-failover-test" - wt := "integration-activity-heartbeat-workflow-failover-test-type" - tl := "integration-activity-heartbeat-workflow-failover-test-tasklist" - identity1 := "worker1" - identity2 := "worker2" - workflowType := &types.WorkflowType{Name: wt} - taskList := &types.TaskList{Name: tl} - startReq := &types.StartWorkflowExecutionRequest{ - RequestID: uuid.New(), - Domain: domainName, - WorkflowID: id, - WorkflowType: workflowType, - TaskList: taskList, - Input: nil, - ExecutionStartToCloseTimeoutSeconds: common.Int32Ptr(300), - TaskStartToCloseTimeoutSeconds: common.Int32Ptr(10), - Identity: identity1, - } - var we *types.StartWorkflowExecutionResponse - for i := 0; i < 10; i++ { - we, err = client1.StartWorkflowExecution(createContext(), startReq) - if err == nil { - break - } - time.Sleep(1 * time.Second) - } - s.NoError(err) - s.NotNil(we.GetRunID()) - - s.logger.Info("StartWorkflowExecution", tag.WorkflowRunID(we.GetRunID())) - - activitySent := false - dtHandler := func(execution *types.WorkflowExecution, wt *types.WorkflowType, - previousStartedEventID, startedEventID int64, history *types.History) ([]byte, []*types.Decision, error) { - if !activitySent { - activitySent = true - return nil, []*types.Decision{{ - DecisionType: types.DecisionTypeScheduleActivityTask.Ptr(), - ScheduleActivityTaskDecisionAttributes: &types.ScheduleActivityTaskDecisionAttributes{ - ActivityID: "1", - ActivityType: &types.ActivityType{Name: "some random activity type"}, - TaskList: &types.TaskList{Name: tl}, - Input: []byte("some random input"), - ScheduleToCloseTimeoutSeconds: common.Int32Ptr(1000), - ScheduleToStartTimeoutSeconds: common.Int32Ptr(1000), - StartToCloseTimeoutSeconds: common.Int32Ptr(1000), - HeartbeatTimeoutSeconds: common.Int32Ptr(3), - RetryPolicy: &types.RetryPolicy{ - InitialIntervalInSeconds: 1, - MaximumAttempts: 3, - MaximumIntervalInSeconds: 1, - NonRetriableErrorReasons: []string{"bad-bug"}, - BackoffCoefficient: 1, - ExpirationIntervalInSeconds: 100, - }, - }, - }}, nil - } - - return nil, []*types.Decision{{ - DecisionType: types.DecisionTypeCompleteWorkflowExecution.Ptr(), - CompleteWorkflowExecutionDecisionAttributes: &types.CompleteWorkflowExecutionDecisionAttributes{ - Result: []byte("Done."), - }, - }}, nil - } - - // activity handler - activity1Called := false - heartbeatDetails := []byte("details") - atHandler1 := func(execution *types.WorkflowExecution, activityType *types.ActivityType, - activityID string, input []byte, taskToken []byte) ([]byte, bool, error) { - activity1Called = true - _, err = client1.RecordActivityTaskHeartbeat(createContext(), &types.RecordActivityTaskHeartbeatRequest{ - TaskToken: taskToken, Details: heartbeatDetails}) - s.Nil(err) - time.Sleep(5 * time.Second) - return []byte("Activity Result."), false, nil - } - - // activity handler - activity2Called := false - atHandler2 := func(execution *types.WorkflowExecution, activityType *types.ActivityType, - activityID string, input []byte, taskToken []byte) ([]byte, bool, error) { - activity2Called = true - return []byte("Activity Result."), false, nil - } - - poller1 := &host.TaskPoller{ - Engine: client1, - Domain: domainName, - TaskList: taskList, - Identity: identity1, - DecisionHandler: dtHandler, - ActivityHandler: atHandler1, - Logger: s.logger, - T: s.T(), - } - - poller2 := &host.TaskPoller{ - Engine: client2, - Domain: domainName, - TaskList: taskList, - Identity: identity2, - DecisionHandler: dtHandler, - ActivityHandler: atHandler2, - Logger: s.logger, - T: s.T(), - } - - describeWorkflowExecution := func(client frontend.Client) (*types.DescribeWorkflowExecutionResponse, error) { - return client.DescribeWorkflowExecution(createContext(), &types.DescribeWorkflowExecutionRequest{ - Domain: domainName, - Execution: &types.WorkflowExecution{ - WorkflowID: id, - RunID: we.RunID, - }, - }) - } - - _, err = poller1.PollAndProcessDecisionTask(false, false) - s.Nil(err) - err = poller1.PollAndProcessActivityTask(false) - s.IsType(&types.EntityNotExistsError{}, err) - - // Update domain to fail over - updateReq := &types.UpdateDomainRequest{ - Name: domainName, - ActiveClusterName: common.StringPtr(clusterName[1]), - } - updateResp, err := client1.UpdateDomain(createContext(), updateReq) - s.NoError(err) - s.NotNil(updateResp) - s.Equal(clusterName[1], updateResp.ReplicationConfiguration.GetActiveClusterName()) - s.Equal(int64(1), updateResp.GetFailoverVersion()) - - // Wait for domain cache to pick the change - time.Sleep(cacheRefreshInterval) - - // Make sure the heartbeat details are sent to cluster2 even when the activity at cluster1 - // has heartbeat timeout. Also make sure the information is recorded when the activity state - // is "Scheduled" - dweResponse, err := describeWorkflowExecution(client2) - s.Nil(err) - pendingActivities := dweResponse.GetPendingActivities() - s.Equal(1, len(pendingActivities)) - s.Equal(types.PendingActivityStateScheduled, pendingActivities[0].GetState()) - s.Equal(heartbeatDetails, pendingActivities[0].GetHeartbeatDetails()) - s.Equal("cadenceInternal:Timeout HEARTBEAT", pendingActivities[0].GetLastFailureReason()) - s.Equal(identity1, pendingActivities[0].GetLastWorkerIdentity()) - - for i := 0; i < 10; i++ { - poller2.PollAndProcessActivityTask(false) - if activity2Called { - break - } else { - time.Sleep(1 * time.Second) - } - } - - s.True(activity1Called) - s.True(activity2Called) - - historyResponse, err := client2.GetWorkflowExecutionHistory(createContext(), &types.GetWorkflowExecutionHistoryRequest{ - Domain: domainName, - Execution: &types.WorkflowExecution{ - WorkflowID: id, - }, - }) - s.Nil(err) - history := historyResponse.History - - activityRetryFound := false - for _, event := range history.Events { - if event.GetEventType() == types.EventTypeActivityTaskStarted { - attribute := event.ActivityTaskStartedEventAttributes - s.True(attribute.GetAttempt() > 0) - activityRetryFound = true - } - } - s.True(activityRetryFound) -} - -func (s *integrationClustersTestSuite) TestTransientDecisionFailover() { - domainName := "test-transient-decision-workflow-failover-" + common.GenerateRandomString(5) - client1 := s.cluster1.GetFrontendClient() // active - regReq := &types.RegisterDomainRequest{ - Name: domainName, - IsGlobalDomain: true, - Clusters: clusterReplicationConfig, - ActiveClusterName: clusterName[0], - WorkflowExecutionRetentionPeriodInDays: 1, - } - err := client1.RegisterDomain(createContext(), regReq) - s.NoError(err) - - descReq := &types.DescribeDomainRequest{ - Name: common.StringPtr(domainName), - } - resp, err := client1.DescribeDomain(createContext(), descReq) - s.NoError(err) - s.NotNil(resp) - // Wait for domain cache to pick the change - time.Sleep(cacheRefreshInterval) - - client2 := s.cluster2.GetFrontendClient() // standby - - // Start a workflow - id := "integration-transient-decision-workflow-failover-test" - wt := "integration-transient-decision-workflow-failover-test-type" - tl := "integration-transient-decision-workflow-failover-test-tasklist" - identity := "worker1" - workflowType := &types.WorkflowType{Name: wt} - taskList := &types.TaskList{Name: tl} - startReq := &types.StartWorkflowExecutionRequest{ - RequestID: uuid.New(), - Domain: domainName, - WorkflowID: id, - WorkflowType: workflowType, - TaskList: taskList, - Input: nil, - ExecutionStartToCloseTimeoutSeconds: common.Int32Ptr(300), - TaskStartToCloseTimeoutSeconds: common.Int32Ptr(8), - Identity: identity, - } - var we *types.StartWorkflowExecutionResponse - for i := 0; i < 10; i++ { - we, err = client1.StartWorkflowExecution(createContext(), startReq) - if err == nil { - break - } - time.Sleep(1 * time.Second) - } - s.NoError(err) - s.NotNil(we.GetRunID()) - - s.logger.Info("StartWorkflowExecution", tag.WorkflowRunID(we.GetRunID())) - - decisionFailed := false - workflowFinished := false - dtHandler := func(execution *types.WorkflowExecution, wt *types.WorkflowType, - previousStartedEventID, startedEventID int64, history *types.History) ([]byte, []*types.Decision, error) { - if !decisionFailed { - decisionFailed = true - return nil, nil, errors.New("random fail decision reason") - } - - workflowFinished = true - return nil, []*types.Decision{{ - DecisionType: types.DecisionTypeCompleteWorkflowExecution.Ptr(), - CompleteWorkflowExecutionDecisionAttributes: &types.CompleteWorkflowExecutionDecisionAttributes{ - Result: []byte("Done."), - }, - }}, nil - } - - poller1 := &host.TaskPoller{ - Engine: client1, - Domain: domainName, - TaskList: taskList, - Identity: identity, - DecisionHandler: dtHandler, - Logger: s.logger, - T: s.T(), - } - - poller2 := &host.TaskPoller{ - Engine: client2, - Domain: domainName, - TaskList: taskList, - Identity: identity, - DecisionHandler: dtHandler, - Logger: s.logger, - T: s.T(), - } - - // this will fail the decision - _, err = poller1.PollAndProcessDecisionTask(false, false) - s.Nil(err) - - // Update domain to fail over - updateReq := &types.UpdateDomainRequest{ - Name: domainName, - ActiveClusterName: common.StringPtr(clusterName[1]), - } - updateResp, err := client1.UpdateDomain(createContext(), updateReq) - s.NoError(err) - s.NotNil(updateResp) - s.Equal(clusterName[1], updateResp.ReplicationConfiguration.GetActiveClusterName()) - s.Equal(int64(1), updateResp.GetFailoverVersion()) - - // Wait for domain cache to pick the change - time.Sleep(cacheRefreshInterval) - - // for failover transient decision, it is guaranteed that the transient decision - // after the failover has attempt 0 - // for details see ReplicateTransientDecisionTaskScheduled - _, err = poller2.PollAndProcessDecisionTaskWithAttempt(false, false, false, false, 0) - s.Nil(err) - s.True(workflowFinished) -} - -func (s *integrationClustersTestSuite) TestCronWorkflowFailover() { - domainName := "test-cron-workflow-failover-" + common.GenerateRandomString(5) - client1 := s.cluster1.GetFrontendClient() // active - regReq := &types.RegisterDomainRequest{ - Name: domainName, - IsGlobalDomain: true, - Clusters: clusterReplicationConfig, - ActiveClusterName: clusterName[0], - WorkflowExecutionRetentionPeriodInDays: 1, - } - err := client1.RegisterDomain(createContext(), regReq) - s.NoError(err) - - descReq := &types.DescribeDomainRequest{ - Name: common.StringPtr(domainName), - } - resp, err := client1.DescribeDomain(createContext(), descReq) - s.NoError(err) - s.NotNil(resp) - // Wait for domain cache to pick the change - time.Sleep(cacheRefreshInterval) - - client2 := s.cluster2.GetFrontendClient() // standby - - // start a workflow - id := "integration-cron-workflow-failover-test" - wt := "integration-cron-workflow-failover-test-type" - tl := "integration-cron-workflow-failover-test-tasklist" - identity := "worker1" - workflowType := &types.WorkflowType{Name: wt} - taskList := &types.TaskList{Name: tl} - startReq := &types.StartWorkflowExecutionRequest{ - RequestID: uuid.New(), - Domain: domainName, - WorkflowID: id, - WorkflowType: workflowType, - TaskList: taskList, - Input: nil, - ExecutionStartToCloseTimeoutSeconds: common.Int32Ptr(100), - TaskStartToCloseTimeoutSeconds: common.Int32Ptr(1), - Identity: identity, - CronSchedule: "@every 5s", - } - we, err := client1.StartWorkflowExecution(createContext(), startReq) - s.NoError(err) - s.NotNil(we.GetRunID()) - - dtHandler := func(execution *types.WorkflowExecution, wt *types.WorkflowType, - previousStartedEventID, startedEventID int64, history *types.History) ([]byte, []*types.Decision, error) { - return nil, []*types.Decision{ - { - DecisionType: types.DecisionTypeCompleteWorkflowExecution.Ptr(), - CompleteWorkflowExecutionDecisionAttributes: &types.CompleteWorkflowExecutionDecisionAttributes{ - Result: []byte("cron-test-result"), - }, - }}, nil - } - - poller2 := host.TaskPoller{ - Engine: client2, - Domain: domainName, - TaskList: taskList, - Identity: identity, - DecisionHandler: dtHandler, - Logger: s.logger, - T: s.T(), - } - - // Failover during backoff - // Update domain to fail over - updateReq := &types.UpdateDomainRequest{ - Name: domainName, - ActiveClusterName: common.StringPtr(clusterName[1]), - } - updateResp, err := client1.UpdateDomain(createContext(), updateReq) - s.NoError(err) - s.NotNil(updateResp) - s.Equal(clusterName[1], updateResp.ReplicationConfiguration.GetActiveClusterName()) - s.Equal(int64(1), updateResp.GetFailoverVersion()) - - // Wait for domain cache to pick the change - time.Sleep(cacheRefreshInterval) - - // Run twice to make sure cron schedule is passed to standby. - for i := 0; i < 2; i++ { - _, err = poller2.PollAndProcessDecisionTask(false, false) - s.Nil(err) - } - - err = client2.TerminateWorkflowExecution(createContext(), &types.TerminateWorkflowExecutionRequest{ - Domain: domainName, - WorkflowExecution: &types.WorkflowExecution{ - WorkflowID: id, - }, - }) - s.Nil(err) -} - -func (s *integrationClustersTestSuite) TestWorkflowRetryFailover() { - domainName := "test-workflow-retry-failover-" + common.GenerateRandomString(5) - client1 := s.cluster1.GetFrontendClient() // active - regReq := &types.RegisterDomainRequest{ - Name: domainName, - IsGlobalDomain: true, - Clusters: clusterReplicationConfig, - ActiveClusterName: clusterName[0], - WorkflowExecutionRetentionPeriodInDays: 1, - } - err := client1.RegisterDomain(createContext(), regReq) - s.NoError(err) - - descReq := &types.DescribeDomainRequest{ - Name: common.StringPtr(domainName), - } - resp, err := client1.DescribeDomain(createContext(), descReq) - s.NoError(err) - s.NotNil(resp) - // Wait for domain cache to pick the change - time.Sleep(cacheRefreshInterval) - - client2 := s.cluster2.GetFrontendClient() // standby - - // start a workflow - id := "integration-workflow-retry-failover-test" - wt := "integration-workflow-retry-failover-test-type" - tl := "integration-workflow-retry-failover-test-tasklist" - identity := "worker1" - workflowType := &types.WorkflowType{Name: wt} - taskList := &types.TaskList{Name: tl} - startReq := &types.StartWorkflowExecutionRequest{ - RequestID: uuid.New(), - Domain: domainName, - WorkflowID: id, - WorkflowType: workflowType, - TaskList: taskList, - Input: nil, - ExecutionStartToCloseTimeoutSeconds: common.Int32Ptr(100), - TaskStartToCloseTimeoutSeconds: common.Int32Ptr(1), - Identity: identity, - RetryPolicy: &types.RetryPolicy{ - InitialIntervalInSeconds: 1, - MaximumAttempts: 3, - MaximumIntervalInSeconds: 1, - NonRetriableErrorReasons: []string{"bad-bug"}, - BackoffCoefficient: 1, - ExpirationIntervalInSeconds: 100, - }, - } - we, err := client1.StartWorkflowExecution(createContext(), startReq) - s.NoError(err) - s.NotNil(we.GetRunID()) - - executions := []*types.WorkflowExecution{} - dtHandler := func(execution *types.WorkflowExecution, wt *types.WorkflowType, - previousStartedEventID, startedEventID int64, history *types.History) ([]byte, []*types.Decision, error) { - executions = append(executions, execution) - return nil, []*types.Decision{ - { - DecisionType: types.DecisionTypeFailWorkflowExecution.Ptr(), - FailWorkflowExecutionDecisionAttributes: &types.FailWorkflowExecutionDecisionAttributes{ - Reason: common.StringPtr("retryable-error"), - Details: nil, - }, - }}, nil - } - - poller2 := host.TaskPoller{ - Engine: client2, - Domain: domainName, - TaskList: taskList, - Identity: identity, - DecisionHandler: dtHandler, - Logger: s.logger, - T: s.T(), - } - - // Update domain to fail over - updateReq := &types.UpdateDomainRequest{ - Name: domainName, - ActiveClusterName: common.StringPtr(clusterName[1]), - } - updateResp, err := client1.UpdateDomain(createContext(), updateReq) - s.NoError(err) - s.NotNil(updateResp) - s.Equal(clusterName[1], updateResp.ReplicationConfiguration.GetActiveClusterName()) - s.Equal(int64(1), updateResp.GetFailoverVersion()) - - // Wait for domain cache to pick the change - time.Sleep(cacheRefreshInterval) - - // First attempt - _, err = poller2.PollAndProcessDecisionTask(false, false) - s.Nil(err) - events := s.getHistory(client2, domainName, executions[0]) - s.Equal(types.EventTypeWorkflowExecutionContinuedAsNew, events[len(events)-1].GetEventType()) - s.Equal(int32(0), events[0].GetWorkflowExecutionStartedEventAttributes().GetAttempt()) - - // second attempt - _, err = poller2.PollAndProcessDecisionTask(false, false) - s.Nil(err) - events = s.getHistory(client2, domainName, executions[1]) - s.Equal(types.EventTypeWorkflowExecutionContinuedAsNew, events[len(events)-1].GetEventType()) - s.Equal(int32(1), events[0].GetWorkflowExecutionStartedEventAttributes().GetAttempt()) - - // third attempt. Still failing, should stop retry. - _, err = poller2.PollAndProcessDecisionTask(false, false) - s.Nil(err) - events = s.getHistory(client2, domainName, executions[2]) - s.Equal(types.EventTypeWorkflowExecutionFailed, events[len(events)-1].GetEventType()) - s.Equal(int32(2), events[0].GetWorkflowExecutionStartedEventAttributes().GetAttempt()) -} - -func (s *integrationClustersTestSuite) getHistory(client host.FrontendClient, domain string, execution *types.WorkflowExecution) []*types.HistoryEvent { - historyResponse, err := client.GetWorkflowExecutionHistory(createContext(), &types.GetWorkflowExecutionHistoryRequest{ - Domain: domain, - Execution: execution, - MaximumPageSize: 5, // Use small page size to force pagination code path - }) - s.Nil(err) - - events := historyResponse.History.Events - for historyResponse.NextPageToken != nil { - historyResponse, err = client.GetWorkflowExecutionHistory(createContext(), &types.GetWorkflowExecutionHistoryRequest{ - Domain: domain, - Execution: execution, - NextPageToken: historyResponse.NextPageToken, - }) - s.Nil(err) - events = append(events, historyResponse.History.Events...) - } - - return events -} diff --git a/service/frontend/adminHandler.go b/service/frontend/adminHandler.go index b5edf37a00c..63bc10feec8 100644 --- a/service/frontend/adminHandler.go +++ b/service/frontend/adminHandler.go @@ -38,7 +38,6 @@ import ( "github.com/uber/cadence/common/definition" "github.com/uber/cadence/common/domain" "github.com/uber/cadence/common/dynamicconfig" - dc "github.com/uber/cadence/common/dynamicconfig" "github.com/uber/cadence/common/elasticsearch" "github.com/uber/cadence/common/log" "github.com/uber/cadence/common/log/tag" @@ -57,8 +56,7 @@ const ( ) var ( - errMaxMessageIDNotSet = &types.BadRequestError{Message: "Max messageID is not set."} - errInvalidFilters = &types.BadRequestError{Message: "Request Filters are invalid, unable to parse."} + errInvalidFilters = &types.BadRequestError{Message: "Request Filters are invalid, unable to parse."} ) type ( @@ -198,7 +196,7 @@ func (adh *adminHandlerImpl) AddSearchAttribute( return adh.error(&types.BadRequestError{Message: "SearchAttributes are not provided"}, scope) } if err := adh.validateConfigForAdvanceVisibility(); err != nil { - return adh.error(&types.BadRequestError{Message: fmt.Sprintf("AdvancedVisibilityStore is not configured for this Cadence Cluster")}, scope) + return adh.error(&types.BadRequestError{Message: "AdvancedVisibilityStore is not configured for this Cadence Cluster"}, scope) } searchAttr := request.GetSearchAttribute() @@ -278,6 +276,9 @@ func (adh *adminHandlerImpl) DescribeWorkflowExecution( } domainID, err := adh.GetDomainCache().GetDomainID(request.GetDomain()) + if err != nil { + return nil, adh.error(err, scope) + } historyAddr := historyHost.GetAddress() resp2, err := adh.GetHistoryClient().DescribeMutableState(ctx, &types.DescribeMutableStateRequest{ @@ -1301,6 +1302,9 @@ func (adh *adminHandlerImpl) GetDynamicConfig(ctx context.Context, request *type return nil, adh.error(err, scope) } value, err = adh.params.DynamicConfig.GetValueWithFilters(keyVal, convFilters, nil) + if err != nil { + return nil, adh.error(err, scope) + } } data, err := json.Marshal(value) @@ -1347,7 +1351,7 @@ func (adh *adminHandlerImpl) RestoreDynamicConfig(ctx context.Context, request * return adh.error(err, scope) } - var filters map[dc.Filter]interface{} + var filters map[dynamicconfig.Filter]interface{} if request.Filters == nil { filters = nil @@ -1371,7 +1375,7 @@ func (adh *adminHandlerImpl) ListDynamicConfig(ctx context.Context, request *typ keyVal, err := checkValidKey(request.ConfigName) if err != nil { - entries, err2 := adh.params.DynamicConfig.ListValue(dc.UnknownKey) + entries, err2 := adh.params.DynamicConfig.ListValue(dynamicconfig.UnknownKey) if err2 != nil { return nil, adh.error(err2, scope) } @@ -1410,15 +1414,15 @@ func convertFromDataBlob(blob *types.DataBlob) (interface{}, error) { } } -func convertFilterListToMap(filters []*types.DynamicConfigFilter) (map[dc.Filter]interface{}, error) { - newFilters := make(map[dc.Filter]interface{}) +func convertFilterListToMap(filters []*types.DynamicConfigFilter) (map[dynamicconfig.Filter]interface{}, error) { + newFilters := make(map[dynamicconfig.Filter]interface{}) for _, filter := range filters { val, err := convertFromDataBlob(filter.Value) if err != nil { return nil, err } - newFilters[dc.ParseFilter(filter.Name)] = val + newFilters[dynamicconfig.ParseFilter(filter.Name)] = val } return newFilters, nil } diff --git a/service/frontend/clusterRedirectionHandler_test.go b/service/frontend/clusterRedirectionHandler_test.go index 3d6dac0a648..eed6ec12941 100644 --- a/service/frontend/clusterRedirectionHandler_test.go +++ b/service/frontend/clusterRedirectionHandler_test.go @@ -708,6 +708,7 @@ func (s *clusterRedirectionHandlerSuite) TestRespondQueryTaskCompleted() { token, err := s.handler.tokenSerializer.SerializeQueryTaskToken(&common.QueryTaskToken{ DomainID: s.domainID, }) + s.NoError(err) req := &types.RespondQueryTaskCompletedRequest{ TaskToken: token, } diff --git a/service/history/decision/handler.go b/service/history/decision/handler.go index e9c523cd7ea..31a6ff15636 100644 --- a/service/history/decision/handler.go +++ b/service/history/decision/handler.go @@ -596,7 +596,7 @@ Update_History_Loop: if decisionHeartbeatTimeout { // at this point, update is successful, but we still return an error to client so that the worker will give up this workflow return nil, &types.EntityNotExistsError{ - Message: fmt.Sprintf("decision heartbeat timeout"), + Message: "decision heartbeat timeout", } } diff --git a/service/history/decision/handler_test.go b/service/history/decision/handler_test.go index cb4e1efaf1d..7750422a8aa 100644 --- a/service/history/decision/handler_test.go +++ b/service/history/decision/handler_test.go @@ -124,7 +124,7 @@ func (s *DecisionHandlerSuite) constructQueryResults(ids []string, resultSize in for _, id := range ids { results[id] = &types.WorkflowQueryResult{ ResultType: types.QueryResultTypeAnswered.Ptr(), - Answer: make([]byte, resultSize, resultSize), + Answer: make([]byte, resultSize), } } return results diff --git a/service/history/events/notifier_test.go b/service/history/events/notifier_test.go index cf232279c5d..b4c4ff6bdc8 100644 --- a/service/history/events/notifier_test.go +++ b/service/history/events/notifier_test.go @@ -98,10 +98,8 @@ func (s *notifierSuite) TestSingleSubscriberWatchingEvents() { s.historyEventNotifier.NotifyNewHistoryEvent(historyEvent) }() - select { - case msg := <-channel: - s.Equal(historyEvent, msg) - } + msg := <-channel + s.Equal(historyEvent, msg) err = s.historyEventNotifier.UnwatchHistoryEvent(definition.NewWorkflowIdentifier(domainID, execution.GetWorkflowID(), execution.GetRunID()), subscriberID) s.Nil(err) diff --git a/service/history/execution/mutable_state_builder.go b/service/history/execution/mutable_state_builder.go index d25d1f18067..29d1ed22dd9 100644 --- a/service/history/execution/mutable_state_builder.go +++ b/service/history/execution/mutable_state_builder.go @@ -809,10 +809,7 @@ func (e *mutableStateBuilder) IsStickyTaskListEnabled() bool { return false } ttl := e.config.StickyTTL(e.GetDomainEntry().GetInfo().Name) - if e.timeSource.Now().After(e.executionInfo.LastUpdatedTimestamp.Add(ttl)) { - return false - } - return true + return !e.timeSource.Now().After(e.executionInfo.LastUpdatedTimestamp.Add(ttl)) } func (e *mutableStateBuilder) CreateNewHistoryEvent( @@ -1980,17 +1977,17 @@ func (e *mutableStateBuilder) addBinaryCheckSumIfNotExists( func (e *mutableStateBuilder) CheckResettable() error { if len(e.GetPendingChildExecutionInfos()) > 0 { return &types.BadRequestError{ - Message: fmt.Sprintf("it is not allowed resetting to a point that workflow has pending child types."), + Message: "it is not allowed resetting to a point that workflow has pending child types.", } } if len(e.GetPendingRequestCancelExternalInfos()) > 0 { return &types.BadRequestError{ - Message: fmt.Sprintf("it is not allowed resetting to a point that workflow has pending request cancel."), + Message: "it is not allowed resetting to a point that workflow has pending request cancel.", } } if len(e.GetPendingSignalExternalInfos()) > 0 { return &types.BadRequestError{ - Message: fmt.Sprintf("it is not allowed resetting to a point that workflow has pending signals to send."), + Message: "it is not allowed resetting to a point that workflow has pending signals to send.", } } return nil diff --git a/service/history/execution/state_rebuilder.go b/service/history/execution/state_rebuilder.go index 0c044ff412a..e0591440118 100644 --- a/service/history/execution/state_rebuilder.go +++ b/service/history/execution/state_rebuilder.go @@ -24,7 +24,6 @@ package execution import ( "context" - ctx "context" "fmt" "time" @@ -50,7 +49,7 @@ type ( // StateRebuilder is a mutable state builder to ndc state rebuild StateRebuilder interface { Rebuild( - ctx ctx.Context, + ctx context.Context, now time.Time, baseWorkflowIdentifier definition.WorkflowIdentifier, baseBranchToken []byte, @@ -101,7 +100,7 @@ func NewStateRebuilder( } func (r *stateRebuilderImpl) Rebuild( - ctx ctx.Context, + ctx context.Context, now time.Time, baseWorkflowIdentifier definition.WorkflowIdentifier, baseBranchToken []byte, diff --git a/service/history/failover/marker_notifier_test.go b/service/history/failover/marker_notifier_test.go index 70c3b6330e0..da9b3211e20 100644 --- a/service/history/failover/marker_notifier_test.go +++ b/service/history/failover/marker_notifier_test.go @@ -161,7 +161,6 @@ func (s *markerNotifierSuite) TestNotifyPendingFailoverMarker() { if count == 1 { close(s.markerNotifier.shutdownCh) } - return }, ) diff --git a/service/history/handler.go b/service/history/handler.go index e1671570bd7..4eb2a00f36e 100644 --- a/service/history/handler.go +++ b/service/history/handler.go @@ -2001,22 +2001,18 @@ func (h *handlerImpl) GetFailoverInfo( // HistoryEngine API calls to ShardOwnershipLost error return by HistoryService for client to be redirected to the // correct shard. func (h *handlerImpl) convertError(err error) error { - switch err.(type) { + switch err := err.(type) { case *persistence.ShardOwnershipLostError: - shardID := err.(*persistence.ShardOwnershipLostError).ShardID - info, err := h.GetMembershipResolver().Lookup(service.History, strconv.Itoa(shardID)) - if err == nil { + info, err2 := h.GetMembershipResolver().Lookup(service.History, strconv.Itoa(err.ShardID)) + if err2 == nil { return shard.CreateShardOwnershipLostError(h.GetHostInfo().GetAddress(), info.GetAddress()) } return shard.CreateShardOwnershipLostError(h.GetHostInfo().GetAddress(), "") case *persistence.WorkflowExecutionAlreadyStartedError: - err := err.(*persistence.WorkflowExecutionAlreadyStartedError) return &types.InternalServiceError{Message: err.Msg} case *persistence.CurrentWorkflowConditionFailedError: - err := err.(*persistence.CurrentWorkflowConditionFailedError) return &types.InternalServiceError{Message: err.Msg} case *persistence.TransactionSizeLimitError: - err := err.(*persistence.TransactionSizeLimitError) return &types.BadRequestError{Message: err.Msg} } diff --git a/service/history/historyEngine.go b/service/history/historyEngine.go index fac0f882c8e..17a52390068 100644 --- a/service/history/historyEngine.go +++ b/service/history/historyEngine.go @@ -1127,7 +1127,7 @@ func (e *historyEngineImpl) QueryWorkflow( queryFirstDecisionTaskWaitTime := defaultQueryFirstDecisionTaskWaitTime ctxDeadline, ok := ctx.Deadline() if ok { - ctxWaitTime := ctxDeadline.Sub(time.Now()) - time.Second + ctxWaitTime := time.Until(ctxDeadline) - time.Second if ctxWaitTime > queryFirstDecisionTaskWaitTime { queryFirstDecisionTaskWaitTime = ctxWaitTime } diff --git a/service/history/historyEngine2_test.go b/service/history/historyEngine2_test.go index d900fb54483..3c4cb6fed7e 100644 --- a/service/history/historyEngine2_test.go +++ b/service/history/historyEngine2_test.go @@ -848,6 +848,7 @@ func (s *engine2Suite) createExecutionStartedState(we types.WorkflowExecution, t } //nolint:unused +//lint:ignore U1000 for printing within tests func (s *engine2Suite) printHistory(builder execution.MutableState) string { return thrift.FromHistory(builder.GetHistoryBuilder().GetHistory()).String() } @@ -1490,9 +1491,10 @@ func (s *engine2Suite) TestNewChildContext() { ctx, cancel := context.WithTimeout(ctx, time.Hour) defer cancel() childCtx, childCancel = s.historyEngine.newChildContext(ctx) + defer childCancel() deadline, ok := childCtx.Deadline() s.True(ok) - s.True(deadline.Sub(time.Now()) < 10*time.Minute) + s.True(time.Until(deadline) < 10*time.Minute) } func (s *engine2Suite) getBuilder(domainID string, we types.WorkflowExecution) execution.MutableState { diff --git a/service/history/historyEngine_test.go b/service/history/historyEngine_test.go index ae30f8802d6..20c4a33dde7 100644 --- a/service/history/historyEngine_test.go +++ b/service/history/historyEngine_test.go @@ -48,7 +48,6 @@ import ( "github.com/uber/cadence/common/log/loggerimpl" "github.com/uber/cadence/common/mocks" "github.com/uber/cadence/common/persistence" - p "github.com/uber/cadence/common/persistence" "github.com/uber/cadence/common/types" "github.com/uber/cadence/common/types/mapper/thrift" "github.com/uber/cadence/service/history/config" @@ -289,8 +288,8 @@ func (s *engineSuite) TestGetMutableStateLongPoll() { RunID: workflowExecution.RunID, ScheduleID: 2, }) - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() timer := time.NewTimer(delay) @@ -951,8 +950,8 @@ func (s *engineSuite) TestRespondDecisionTaskCompletedUpdateExecutionFailed() { gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, errors.New("FAILED")).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, errors.New("FAILED")).Once() s.mockShardManager.On("UpdateShard", mock.Anything, mock.Anything).Return(nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ @@ -1118,13 +1117,13 @@ func (s *engineSuite) TestRespondDecisionTaskCompletedConflictOnUpdate() { gwmsResponse2 := &persistence.GetWorkflowExecutionResponse{State: ms2} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, &persistence.ConditionFailedError{}).Once() s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse2, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -1219,8 +1218,8 @@ func (s *engineSuite) TestRespondDecisionTaskCompletedMaxAttemptsExceeded() { gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, &persistence.ConditionFailedError{}).Once() } @@ -1299,8 +1298,8 @@ func (s *engineSuite) TestRespondDecisionTaskCompletedCompleteWorkflowFailed() { s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() } - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -1388,8 +1387,8 @@ func (s *engineSuite) TestRespondDecisionTaskCompletedFailWorkflowFailed() { s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() } - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -1462,9 +1461,9 @@ func (s *engineSuite) TestRespondDecisionTaskCompletedBadDecisionAttributes() { s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse1, nil).Once() s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse2, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{ - MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil, + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{ + MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil, ).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ @@ -1582,8 +1581,8 @@ func (s *engineSuite) TestRespondDecisionTaskCompletedSingleActivityScheduledAtt s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse2, nil).Once() } - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -1634,7 +1633,7 @@ func (s *engineSuite) TestRespondDecisionTaskCompletedBadBinary() { executionContext := []byte("context") domainEntry := cache.NewLocalDomainCacheEntryForTest( &persistence.DomainInfo{ID: domainID, Name: constants.TestDomainName}, - &p.DomainConfig{ + &persistence.DomainConfig{ Retention: 2, BadBinaries: types.BadBinaries{ Binaries: map[string]*types.BadBinaryInfo{ @@ -1663,8 +1662,8 @@ func (s *engineSuite) TestRespondDecisionTaskCompletedBadBinary() { s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse1, nil).Once() s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse2, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() s.mockDomainCache.EXPECT().GetDomainByID(domainID).Return(domainEntry, nil).AnyTimes() s.mockDomainCache.EXPECT().GetActiveDomainByID(domainID).Return(domainEntry, nil).AnyTimes() s.mockDomainCache.EXPECT().GetDomainName(domainID).Return(constants.TestDomainName, nil).AnyTimes() @@ -1732,8 +1731,8 @@ func (s *engineSuite) TestRespondDecisionTaskCompletedSingleActivityScheduledDec gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -1796,8 +1795,8 @@ func (s *engineSuite) TestRespondDecisionTaskCompleted_DecisionHeartbeatTimeout( gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -1844,8 +1843,8 @@ func (s *engineSuite) TestRespondDecisionTaskCompleted_DecisionHeartbeatNotTimeo gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -1892,8 +1891,8 @@ func (s *engineSuite) TestRespondDecisionTaskCompleted_DecisionHeartbeatNotTimeo gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -1945,8 +1944,8 @@ func (s *engineSuite) TestRespondDecisionTaskCompletedCompleteWorkflowSuccess() gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -2005,8 +2004,8 @@ func (s *engineSuite) TestRespondDecisionTaskCompletedFailWorkflowSuccess() { gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -2068,8 +2067,8 @@ func (s *engineSuite) TestRespondDecisionTaskCompletedSignalExternalWorkflowSucc gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -2129,8 +2128,8 @@ func (s *engineSuite) TestRespondDecisionTaskCompletedStartChildWorkflowWithAban gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -2198,8 +2197,8 @@ func (s *engineSuite) TestRespondDecisionTaskCompletedStartChildWorkflowWithTerm gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -2532,8 +2531,8 @@ func (s *engineSuite) TestRespondActivityTaskCompletedUpdateExecutionFailed() { gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, errors.New("FAILED")).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, errors.New("FAILED")).Once() s.mockShardManager.On("UpdateShard", mock.Anything, mock.Anything).Return(nil).Once() err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &types.HistoryRespondActivityTaskCompletedRequest{ @@ -2695,12 +2694,12 @@ func (s *engineSuite) TestRespondActivityTaskCompletedConflictOnUpdate() { gwmsResponse2 := &persistence.GetWorkflowExecutionResponse{State: ms2} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse1, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, &persistence.ConditionFailedError{}).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, &persistence.ConditionFailedError{}).Once() s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse2, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &types.HistoryRespondActivityTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -2762,8 +2761,8 @@ func (s *engineSuite) TestRespondActivityTaskCompletedMaxAttemptsExceeded() { gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, &persistence.ConditionFailedError{}).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, &persistence.ConditionFailedError{}).Once() } err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &types.HistoryRespondActivityTaskCompletedRequest{ @@ -2814,8 +2813,8 @@ func (s *engineSuite) TestRespondActivityTaskCompletedSuccess() { gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &types.HistoryRespondActivityTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -2879,8 +2878,8 @@ func (s *engineSuite) TestRespondActivityTaskCompletedByIdSuccess() { s.mockExecutionMgr.On("GetCurrentExecution", mock.Anything, mock.Anything).Return(gceResponse, nil).Once() s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() err := s.mockHistoryEngine.RespondActivityTaskCompleted(context.Background(), &types.HistoryRespondActivityTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -3101,8 +3100,8 @@ func (s *engineSuite) TestRespondActivityTaskFailedUpdateExecutionFailed() { gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, errors.New("FAILED")).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, errors.New("FAILED")).Once() s.mockShardManager.On("UpdateShard", mock.Anything, mock.Anything).Return(nil).Once() err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &types.HistoryRespondActivityTaskFailedRequest{ @@ -3269,12 +3268,12 @@ func (s *engineSuite) TestRespondActivityTaskFailedConflictOnUpdate() { gwmsResponse2 := &persistence.GetWorkflowExecutionResponse{State: ms2} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse1, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, &persistence.ConditionFailedError{}).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, &persistence.ConditionFailedError{}).Once() s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse2, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &types.HistoryRespondActivityTaskFailedRequest{ DomainUUID: constants.TestDomainID, @@ -3336,8 +3335,8 @@ func (s *engineSuite) TestRespondActivityTaskFailedMaxAttemptsExceeded() { gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, &persistence.ConditionFailedError{}).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, &persistence.ConditionFailedError{}).Once() } err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &types.HistoryRespondActivityTaskFailedRequest{ @@ -3388,8 +3387,8 @@ func (s *engineSuite) TestRespondActivityTaskFailedSuccess() { gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &types.HistoryRespondActivityTaskFailedRequest{ DomainUUID: constants.TestDomainID, @@ -3455,8 +3454,8 @@ func (s *engineSuite) TestRespondActivityTaskFailedByIDSuccess() { s.mockExecutionMgr.On("GetCurrentExecution", mock.Anything, mock.Anything).Return(gceResponse, nil).Once() s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() err := s.mockHistoryEngine.RespondActivityTaskFailed(context.Background(), &types.HistoryRespondActivityTaskFailedRequest{ DomainUUID: constants.TestDomainID, @@ -3517,7 +3516,7 @@ func (s *engineSuite) TestRecordActivityTaskHeartBeatSuccess_NoTimer() { ms := execution.CreatePersistenceMutableState(msBuilder) gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() detais := []byte("details") @@ -3569,7 +3568,7 @@ func (s *engineSuite) TestRecordActivityTaskHeartBeatSuccess_TimerRunning() { // HeartBeat timer running. s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() detais := []byte("details") @@ -3626,7 +3625,7 @@ func (s *engineSuite) TestRecordActivityTaskHeartBeatByIDSuccess() { ms := execution.CreatePersistenceMutableState(msBuilder) gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() detais := []byte("details") @@ -3727,8 +3726,8 @@ func (s *engineSuite) TestRespondActivityTaskCanceled_Started() { gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() err = s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &types.HistoryRespondActivityTaskCanceledRequest{ DomainUUID: constants.TestDomainID, @@ -3792,8 +3791,8 @@ func (s *engineSuite) TestRespondActivityTaskCanceledByID_Started() { s.mockExecutionMgr.On("GetCurrentExecution", mock.Anything, mock.Anything).Return(gceResponse, nil).Once() s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() err = s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &types.HistoryRespondActivityTaskCanceledRequest{ DomainUUID: constants.TestDomainID, @@ -3937,8 +3936,8 @@ func (s *engineSuite) TestRequestCancel_RespondDecisionTaskCompleted_NotSchedule gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -4001,8 +4000,8 @@ func (s *engineSuite) TestRequestCancel_RespondDecisionTaskCompleted_Scheduled() }} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -4071,8 +4070,8 @@ func (s *engineSuite) TestRequestCancel_RespondDecisionTaskCompleted_Started() { }} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -4144,8 +4143,8 @@ func (s *engineSuite) TestRequestCancel_RespondDecisionTaskCompleted_Completed() ms := execution.CreatePersistenceMutableState(msBuilder) gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -4210,8 +4209,8 @@ func (s *engineSuite) TestRequestCancel_RespondDecisionTaskCompleted_NoHeartBeat }} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -4231,7 +4230,7 @@ func (s *engineSuite) TestRequestCancel_RespondDecisionTaskCompleted_NoHeartBeat s.False(executionBuilder.HasPendingDecision()) // Try recording activity heartbeat - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() activityTaskToken, _ := json.Marshal(&common.TaskToken{ WorkflowID: "wId", @@ -4252,8 +4251,8 @@ func (s *engineSuite) TestRequestCancel_RespondDecisionTaskCompleted_NoHeartBeat s.True(hbResponse.CancelRequested) // Try cancelling the request. - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() err = s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &types.HistoryRespondActivityTaskCanceledRequest{ DomainUUID: constants.TestDomainID, @@ -4317,8 +4316,8 @@ func (s *engineSuite) TestRequestCancel_RespondDecisionTaskCompleted_Success() { }} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -4338,7 +4337,7 @@ func (s *engineSuite) TestRequestCancel_RespondDecisionTaskCompleted_Success() { s.False(executionBuilder.HasPendingDecision()) // Try recording activity heartbeat - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() activityTaskToken, _ := json.Marshal(&common.TaskToken{ WorkflowID: "wId", @@ -4359,8 +4358,8 @@ func (s *engineSuite) TestRequestCancel_RespondDecisionTaskCompleted_Success() { s.True(hbResponse.CancelRequested) // Try cancelling the request. - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() err = s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &types.HistoryRespondActivityTaskCanceledRequest{ DomainUUID: constants.TestDomainID, @@ -4423,8 +4422,8 @@ func (s *engineSuite) TestRequestCancel_RespondDecisionTaskCompleted_SuccessWith }} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() // load mutable state such that it already exists in memory when respond decision task is called // this enables us to set query registry on it @@ -4485,7 +4484,7 @@ func (s *engineSuite) TestRequestCancel_RespondDecisionTaskCompleted_SuccessWith s.Equal(query.TerminationTypeUnblocked, unblocked1.TerminationType) // Try recording activity heartbeat - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() activityTaskToken, _ := json.Marshal(&common.TaskToken{ WorkflowID: "wId", @@ -4506,8 +4505,8 @@ func (s *engineSuite) TestRequestCancel_RespondDecisionTaskCompleted_SuccessWith s.True(hbResponse.CancelRequested) // Try cancelling the request. - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() err = s.mockHistoryEngine.RespondActivityTaskCanceled(context.Background(), &types.HistoryRespondActivityTaskCanceledRequest{ DomainUUID: constants.TestDomainID, @@ -4570,8 +4569,8 @@ func (s *engineSuite) TestRequestCancel_RespondDecisionTaskCompleted_SuccessWith }} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() // load mutable state such that it already exists in memory when respond decision task is called // this enables us to set query registry on it @@ -4655,8 +4654,8 @@ func (s *engineSuite) TestStarTimer_DuplicateTimerID() { }} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -4686,14 +4685,14 @@ func (s *engineSuite) TestStarTimer_DuplicateTimerID() { decisionFailedEvent := false s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse2, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Run(func(arguments mock.Arguments) { + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Run(func(arguments mock.Arguments) { req := arguments.Get(1).(*persistence.AppendHistoryNodesRequest) decTaskIndex := len(req.Events) - 1 if decTaskIndex >= 0 && *req.Events[decTaskIndex].EventType == types.EventTypeDecisionTaskFailed { decisionFailedEvent = true } }).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err = s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -4759,8 +4758,8 @@ func (s *engineSuite) TestUserTimer_RespondDecisionTaskCompleted() { }} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -4817,8 +4816,8 @@ func (s *engineSuite) TestCancelTimer_RespondDecisionTaskCompleted_NoStartTimer( }} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err := s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -4884,12 +4883,12 @@ func (s *engineSuite) TestCancelTimer_RespondDecisionTaskCompleted_TimerFired() }} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.MatchedBy(func(input *persistence.UpdateWorkflowExecutionRequest) bool { // need to check whether the buffered events are cleared s.True(input.UpdateWorkflowMutation.ClearBufferedEvents) return true - })).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + })).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() _, err = s.mockHistoryEngine.RespondDecisionTaskCompleted(context.Background(), &types.HistoryRespondDecisionTaskCompletedRequest{ DomainUUID: constants.TestDomainID, @@ -4949,8 +4948,8 @@ func (s *engineSuite) TestSignalWorkflowExecution() { gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() err := s.mockHistoryEngine.SignalWorkflowExecution(context.Background(), signalRequest) s.Nil(err) @@ -5113,7 +5112,7 @@ func (s *engineSuite) TestRemoveSignalMutableState() { gwmsResponse := &persistence.GetWorkflowExecutionResponse{State: ms} s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(gwmsResponse, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() err = s.mockHistoryEngine.RemoveSignalMutableState(context.Background(), removeRequest) s.Nil(err) diff --git a/service/history/ndc/branch_manager_test.go b/service/history/ndc/branch_manager_test.go index f8e0791df0f..d3873c58be2 100644 --- a/service/history/ndc/branch_manager_test.go +++ b/service/history/ndc/branch_manager_test.go @@ -212,6 +212,7 @@ func (s *branchManagerSuite) TestFlushBufferedEvents() { ctx := ctx.Background() _, _, err = s.branchManager.flushBufferedEvents(ctx, incomingVersionHistory) + s.NoError(err) } func (s *branchManagerSuite) TestPrepareVersionHistory_BranchAppendable_NoMissingEventInBetween() { diff --git a/service/history/ndc/replication_task.go b/service/history/ndc/replication_task.go index 103422d572d..de94a85cb76 100644 --- a/service/history/ndc/replication_task.go +++ b/service/history/ndc/replication_task.go @@ -354,10 +354,7 @@ func validateReplicateEventsRequest( } func validateUUID(input string) bool { - if uuid.Parse(input) == nil { - return false - } - return true + return uuid.Parse(input) != nil } func validateEvents(events []*types.HistoryEvent) (int64, error) { diff --git a/service/history/ndc/workflow_resetter.go b/service/history/ndc/workflow_resetter.go index a9e84fbe579..514beca2f92 100644 --- a/service/history/ndc/workflow_resetter.go +++ b/service/history/ndc/workflow_resetter.go @@ -120,6 +120,9 @@ func (r *workflowResetterImpl) ResetWorkflow( } resetBranchToken, err := r.getResetBranchToken(ctx, baseBranchToken, baseLastEventID) + if err != nil { + return nil, err + } requestID := uuid.New() rebuildMutableState, rebuiltHistorySize, err := r.stateRebuilder.Rebuild( diff --git a/service/history/query/registry.go b/service/history/query/registry.go index 1cf3d582202..6c08391a71a 100644 --- a/service/history/query/registry.go +++ b/service/history/query/registry.go @@ -208,7 +208,7 @@ func (r *registryImpl) getQueryNoLock(id string) (query, error) { } func (r *registryImpl) getIDs(m map[string]query) []string { - result := make([]string, len(m), len(m)) + result := make([]string, len(m)) index := 0 for id := range m { result[index] = id diff --git a/service/history/query/registry_test.go b/service/history/query/registry_test.go index cfae5d48057..72f615965d5 100644 --- a/service/history/query/registry_test.go +++ b/service/history/query/registry_test.go @@ -47,8 +47,8 @@ func (s *QueryRegistrySuite) SetupTest() { func (s *QueryRegistrySuite) TestQueryRegistry() { qr := NewRegistry() - ids := make([]string, 100, 100) - termChans := make([]<-chan struct{}, 100, 100) + ids := make([]string, 100) + termChans := make([]<-chan struct{}, 100) for i := 0; i < 100; i++ { ids[i], termChans[i] = qr.BufferQuery(&types.WorkflowQuery{}) } diff --git a/service/history/queue/processing_queue.go b/service/history/queue/processing_queue.go index 3b6461619b2..8bf8d69f36f 100644 --- a/service/history/queue/processing_queue.go +++ b/service/history/queue/processing_queue.go @@ -268,7 +268,7 @@ func (q *processingQueueImpl) AddTasks( if !taskBelongsToProcessQueue(q.state, key, task) { errMsg := "Processing queue encountered a task doesn't belong to its scope" q.logger.Error(errMsg, tag.Error( - fmt.Errorf("Processing queue state: %+v, task: %+v", q.state, key), + fmt.Errorf("processing queue state: %+v, task: %+v", q.state, key), )) panic(errMsg) } diff --git a/service/history/queue/timer_gate_test.go b/service/history/queue/timer_gate_test.go index 02c01a5461d..03a2e85ac9b 100644 --- a/service/history/queue/timer_gate_test.go +++ b/service/history/queue/timer_gate_test.go @@ -174,9 +174,9 @@ func (s *localTimerGateSuite) TestTimerFireAfterUpdate_NotActive_Updated() { deadline := now.Add(3 * time.Second) s.localTimerGate.Update(newTimer) - select { // this is to drain existing signal - case <-s.localTimerGate.FireChan(): - } + // this is to drain existing signal + <-s.localTimerGate.FireChan() + // test setup up complete s.True(s.localTimerGate.Update(updatedNewTimer)) @@ -194,9 +194,9 @@ func (s *localTimerGateSuite) TestTimerFireAfterUpdate_NotActive_NotUpdated() { deadline := now.Add(1 * time.Second) s.localTimerGate.Update(newTimer) - select { // this is to drain existing signal - case <-s.localTimerGate.FireChan(): - } + // this is to drain existing signal + <-s.localTimerGate.FireChan() + // test setup up complete s.True(s.localTimerGate.Update(updatedNewTimer)) @@ -346,9 +346,9 @@ func (s *remoteTimerGateSuite) TestTimerFireAfterUpdate_NotActive_Updated() { deadline := now.Add(2 * time.Second) s.remoteTimerGate.Update(newTimer) - select { // this is to drain existing signal - case <-s.remoteTimerGate.FireChan(): - } + // this is to drain existing signal + <-s.remoteTimerGate.FireChan() + // test setup up complete s.True(s.remoteTimerGate.Update(updatedNewTimer)) @@ -372,9 +372,9 @@ func (s *remoteTimerGateSuite) TestTimerFireAfterUpdate_NotActive_NotUpdated() { updatedNewTimer := now.Add(-1 * time.Second) s.remoteTimerGate.Update(newTimer) - select { // this is to drain existing signal - case <-s.remoteTimerGate.FireChan(): - } + // this is to drain existing signal + <-s.remoteTimerGate.FireChan() + // test setup up complete s.True(s.remoteTimerGate.Update(updatedNewTimer)) @@ -402,9 +402,9 @@ func (s *remoteTimerGateSuite) TestTimerSetCurrentTime_Update_TimerAlreadyFired( newCurrentTime := now.Add(1 * time.Second) s.remoteTimerGate.Update(newTimer) - select { // this is to drain existing signal - case <-s.remoteTimerGate.FireChan(): - } + // this is to drain existing signal + <-s.remoteTimerGate.FireChan() + // test setup up complete s.True(s.remoteTimerGate.SetCurrentTime(newCurrentTime)) diff --git a/service/history/replication/dlq_handler.go b/service/history/replication/dlq_handler.go index b13d5cb88ca..159fb26656e 100644 --- a/service/history/replication/dlq_handler.go +++ b/service/history/replication/dlq_handler.go @@ -204,6 +204,9 @@ func (r *dlqHandlerImpl) MergeMessages( pageSize, pageToken, ) + if err != nil { + return nil, err + } lastMessageID = defaultBeginningMessageID for _, task := range tasks { diff --git a/service/history/replication/task_ack_manager.go b/service/history/replication/task_ack_manager.go index f010ec24ced..bdf5c1621d8 100644 --- a/service/history/replication/task_ack_manager.go +++ b/service/history/replication/task_ack_manager.go @@ -26,7 +26,6 @@ package replication import ( "context" - ctx "context" "errors" "strconv" "sync/atomic" @@ -60,12 +59,12 @@ type ( // TaskAckManager is the ack manager for replication tasks TaskAckManager interface { GetTask( - ctx ctx.Context, + ctx context.Context, taskInfo *types.ReplicationTaskInfo, ) (*types.ReplicationTask, error) GetTasks( - ctx ctx.Context, + ctx context.Context, pollingCluster string, lastReadTaskID int64, ) (*types.ReplicationMessages, error) @@ -126,7 +125,7 @@ func NewTaskAckManager( } func (t *taskAckManagerImpl) GetTask( - ctx ctx.Context, + ctx context.Context, taskInfo *types.ReplicationTaskInfo, ) (*types.ReplicationTask, error) { task := &persistence.ReplicationTaskInfo{ @@ -144,7 +143,7 @@ func (t *taskAckManagerImpl) GetTask( } func (t *taskAckManagerImpl) GetTasks( - ctx ctx.Context, + ctx context.Context, pollingCluster string, lastReadTaskID int64, ) (*types.ReplicationMessages, error) { @@ -243,7 +242,7 @@ TaskInfoLoop: } func (t *taskAckManagerImpl) toReplicationTask( - ctx ctx.Context, + ctx context.Context, taskInfo task.Info, ) (*types.ReplicationTask, error) { @@ -277,7 +276,7 @@ func (t *taskAckManagerImpl) toReplicationTask( } func (t *taskAckManagerImpl) processReplication( - ctx ctx.Context, + ctx context.Context, processTaskIfClosed bool, taskInfo *persistence.ReplicationTaskInfo, action func( @@ -370,7 +369,7 @@ func (t *taskAckManagerImpl) getEventsBlob( } func (t *taskAckManagerImpl) isNewRunNDCEnabled( - ctx ctx.Context, + ctx context.Context, domainID string, workflowID string, runID string, @@ -507,7 +506,7 @@ func (t *taskAckManagerImpl) generateFailoverMarkerTask( } func (t *taskAckManagerImpl) generateSyncActivityTask( - ctx ctx.Context, + ctx context.Context, taskInfo *persistence.ReplicationTaskInfo, ) (*types.ReplicationTask, error) { @@ -568,7 +567,7 @@ func (t *taskAckManagerImpl) generateSyncActivityTask( } func (t *taskAckManagerImpl) generateHistoryReplicationTask( - ctx ctx.Context, + ctx context.Context, task *persistence.ReplicationTaskInfo, ) (*types.ReplicationTask, error) { diff --git a/service/history/replication/task_processor.go b/service/history/replication/task_processor.go index fd1b426af4d..505a3c59117 100644 --- a/service/history/replication/task_processor.go +++ b/service/history/replication/task_processor.go @@ -325,7 +325,7 @@ func (p *taskProcessorImpl) processResponse(response *types.ReplicationMessages) backoffDuration := p.noTaskRetrier.NextBackOff() time.Sleep(backoffDuration) } else { - scope.RecordTimer(metrics.ReplicationTasksAppliedLatency, time.Now().Sub(batchRequestStartTime)) + scope.RecordTimer(metrics.ReplicationTasksAppliedLatency, time.Since(batchRequestStartTime)) } p.lastProcessedMessageID = response.GetLastRetrievedMessageID() @@ -563,7 +563,7 @@ func (p *taskProcessorImpl) triggerDataInconsistencyScan(replicationTask *types. case replicationTask.GetHistoryTaskV2Attributes() != nil: attr := replicationTask.GetHistoryTaskV2Attributes() versionHistoryItems := attr.GetVersionHistoryItems() - if versionHistoryItems == nil || len(versionHistoryItems) == 0 { + if len(versionHistoryItems) == 0 { return errors.New("failed to trigger data scan due to invalid version history") } // version history items in same batch should be the same diff --git a/service/history/reset/resetter.go b/service/history/reset/resetter.go index 73101593267..2f02addab91 100644 --- a/service/history/reset/resetter.go +++ b/service/history/reset/resetter.go @@ -24,7 +24,6 @@ package reset import ( "context" - "fmt" "github.com/uber/cadence/common" "github.com/uber/cadence/common/cache" @@ -634,7 +633,7 @@ func (r *workflowResetterImpl) closePendingDecisionTask( if len(resetMutableState.GetPendingChildExecutionInfos()) > 0 { return nil, &types.BadRequestError{ - Message: fmt.Sprintf("Can not reset workflow with pending child workflows"), + Message: "Can not reset workflow with pending child workflows", } } diff --git a/service/history/service.go b/service/history/service.go index 36b2b60d0c1..9bbeb648675 100644 --- a/service/history/service.go +++ b/service/history/service.go @@ -123,7 +123,6 @@ func (s *Service) Stop() { // 7. force stop the whole world and return const gossipPropagationDelay = 400 * time.Millisecond - const shardOwnershipTransferDelay = 5 * time.Second const gracePeriod = 2 * time.Second remainingTime := s.config.ShutdownDrainDuration() @@ -135,7 +134,7 @@ func (s *Service) Stop() { remainingTime = common.SleepWithMinDuration(gossipPropagationDelay, remainingTime) remainingTime = s.handler.PrepareToStop(remainingTime) - remainingTime = common.SleepWithMinDuration(gracePeriod, remainingTime) + _ = common.SleepWithMinDuration(gracePeriod, remainingTime) close(s.stopC) diff --git a/service/history/shard/context.go b/service/history/shard/context.go index 8e273da85cf..9318765b69e 100644 --- a/service/history/shard/context.go +++ b/service/history/shard/context.go @@ -313,7 +313,7 @@ func (s *contextImpl) UpdateTransferProcessingQueueStates(cluster string, states defer s.Unlock() if len(states) == 0 { - return errors.New("Empty transfer processing queue states") + return errors.New("empty transfer processing queue states") } if s.shardInfo.TransferProcessingQueueStates.StatesByCluster == nil { @@ -360,7 +360,7 @@ func (s *contextImpl) UpdateCrossClusterProcessingQueueStates(cluster string, st defer s.Unlock() if len(states) == 0 { - return errors.New("Empty cross-cluster processing queue states") + return errors.New("empty cross-cluster processing queue states") } if s.shardInfo.CrossClusterProcessingQueueStates.StatesByCluster == nil { @@ -469,7 +469,7 @@ func (s *contextImpl) UpdateTimerProcessingQueueStates(cluster string, states [] defer s.Unlock() if len(states) == 0 { - return errors.New("Empty transfer processing queue states") + return errors.New("empty transfer processing queue states") } if s.shardInfo.TimerProcessingQueueStates.StatesByCluster == nil { diff --git a/service/history/shard/controller.go b/service/history/shard/controller.go index 4bcc0307837..ef7089b1b33 100644 --- a/service/history/shard/controller.go +++ b/service/history/shard/controller.go @@ -326,12 +326,12 @@ func (c *controller) removeHistoryShardItem(shardID int, shardItem *historyShard currentShardItem, ok := c.historyShards[shardID] if !ok { - return nil, fmt.Errorf("No item found to remove for shard: %v", shardID) + return nil, fmt.Errorf("no item found to remove for shard: %v", shardID) } if shardItem != nil && currentShardItem != shardItem { // the shardItem comparison is a defensive check to make sure we are deleting // what we intend to delete. - return nil, fmt.Errorf("Current shardItem doesn't match the one we intend to delete for shard: %v", shardID) + return nil, fmt.Errorf("current shardItem doesn't match the one we intend to delete for shard: %v", shardID) } delete(c.historyShards, shardID) diff --git a/service/history/task/cross_cluster_source_task_executor_test.go b/service/history/task/cross_cluster_source_task_executor_test.go index bdedd11993b..538ada426e7 100644 --- a/service/history/task/cross_cluster_source_task_executor_test.go +++ b/service/history/task/cross_cluster_source_task_executor_test.go @@ -1117,7 +1117,7 @@ func (s *crossClusterSourceTaskExecutorSuite) TestExecuteStartChildExecution_Ini crossClusterTask Task, childInfo *p.ChildExecutionInfo, ) { - lastEvent = test.AddChildWorkflowExecutionStartedEvent(mutableState, lastEvent.GetEventID(), constants.TestTargetDomainID, targetExecution.GetWorkflowID(), targetExecution.GetRunID(), childInfo.WorkflowTypeName) + _ = test.AddChildWorkflowExecutionStartedEvent(mutableState, lastEvent.GetEventID(), constants.TestTargetDomainID, targetExecution.GetWorkflowID(), targetExecution.GetRunID(), childInfo.WorkflowTypeName) di := test.AddDecisionTaskScheduledEvent(mutableState) lastEvent = test.AddDecisionTaskStartedEvent(mutableState, di.ScheduleID, mutableState.GetExecutionInfo().TaskList, "some random identity") lastEvent = test.AddDecisionTaskCompletedEvent(mutableState, di.ScheduleID, lastEvent.GetEventID(), nil, "some random identity") diff --git a/service/history/task/cross_cluster_target_task_executor.go b/service/history/task/cross_cluster_target_task_executor.go index 9b97ea99052..67411253a75 100644 --- a/service/history/task/cross_cluster_target_task_executor.go +++ b/service/history/task/cross_cluster_target_task_executor.go @@ -37,7 +37,7 @@ import ( ) var ( - errUnknownCrossClusterTask = errors.New("Unknown cross cluster task") + errUnknownCrossClusterTask = errors.New("unknown cross cluster task") errUnknownTaskProcessingState = errors.New("unknown cross cluster task processing state") errMissingTaskRequestAttributes = errors.New("request attributes not specified") errDomainNotExists = errors.New("domain not exists") diff --git a/service/history/task/cross_cluster_task_test.go b/service/history/task/cross_cluster_task_test.go index 8148af31ff8..984956e18b2 100644 --- a/service/history/task/cross_cluster_task_test.go +++ b/service/history/task/cross_cluster_task_test.go @@ -37,12 +37,10 @@ import ( "github.com/uber/cadence/common/dynamicconfig" "github.com/uber/cadence/common/mocks" "github.com/uber/cadence/common/persistence" - p "github.com/uber/cadence/common/persistence" ctask "github.com/uber/cadence/common/task" "github.com/uber/cadence/common/types" "github.com/uber/cadence/service/history/config" "github.com/uber/cadence/service/history/constants" - "github.com/uber/cadence/service/history/engine" "github.com/uber/cadence/service/history/events" "github.com/uber/cadence/service/history/execution" "github.com/uber/cadence/service/history/shard" @@ -56,7 +54,6 @@ type ( controller *gomock.Controller mockShard *shard.TestContext - mockEngine *engine.MockEngine mockDomainCache *cache.MockDomainCache mockClusterMetadata *cluster.MockMetadata mockExecutionMgr *mocks.ExecutionManager @@ -79,7 +76,7 @@ func (s *crossClusterTaskSuite) SetupTest() { config := config.NewForTest() s.mockShard = shard.NewTestContext( s.controller, - &p.ShardInfo{ + &persistence.ShardInfo{ ShardID: 0, RangeID: 1, }, @@ -122,7 +119,7 @@ func (s *crossClusterTaskSuite) SetupTest() { func (s *crossClusterTaskSuite) TestSourceTask_Execute() { sourceTask := s.newTestSourceTask( cluster.TestAlternativeClusterName, - &p.CrossClusterTaskInfo{ + &persistence.CrossClusterTaskInfo{ DomainID: constants.TestDomainID, }, ) @@ -137,7 +134,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_Execute() { func (s *crossClusterTaskSuite) TestSourceTask_Ack() { sourceTask := s.newTestSourceTask( cluster.TestAlternativeClusterName, - &p.CrossClusterTaskInfo{ + &persistence.CrossClusterTaskInfo{ DomainID: constants.TestDomainID, }, ) @@ -168,7 +165,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_Ack() { func (s *crossClusterTaskSuite) TestSourceTask_Nack() { sourceTask := s.newTestSourceTask( cluster.TestAlternativeClusterName, - &p.CrossClusterTaskInfo{ + &persistence.CrossClusterTaskInfo{ DomainID: constants.TestDomainID, }, ) @@ -234,7 +231,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_HandleError() { for _, tc := range testCases { sourceTask := s.newTestSourceTask( cluster.TestAlternativeClusterName, - &p.CrossClusterTaskInfo{ + &persistence.CrossClusterTaskInfo{ DomainID: constants.TestDomainID, }, ) @@ -283,7 +280,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_IsReadyForPoll() { for _, tc := range testCases { sourceTask := s.newTestSourceTask( cluster.TestAlternativeClusterName, - &p.CrossClusterTaskInfo{ + &persistence.CrossClusterTaskInfo{ DomainID: constants.TestDomainID, }, ) @@ -329,7 +326,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_IsValid() { for _, tc := range testCases { sourceTask := s.newTestSourceTask( tc.targetCluster, - &p.CrossClusterTaskInfo{ + &persistence.CrossClusterTaskInfo{ DomainID: tc.sourceDomainID, TargetDomainID: tc.targetDomainID, }, @@ -447,7 +444,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_RecordResponse() { for _, tc := range testCases { sourceTask := s.newTestSourceTask( cluster.TestAlternativeClusterName, - &p.CrossClusterTaskInfo{ + &persistence.CrossClusterTaskInfo{ DomainID: constants.TestDomainID, TaskType: tc.taskType, }, @@ -477,7 +474,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_GetStartChildRequest_Invalidated( request *types.CrossClusterTaskRequest, getRequestErr error, sourceTask *crossClusterSourceTask, - childInfo *p.ChildExecutionInfo, + childInfo *persistence.ChildExecutionInfo, initiatedEvent *types.HistoryEvent, ) { s.Error(getRequestErr) @@ -498,7 +495,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_GetStartChildRequest_AlreadyStart workflowExecution, targetExecution types.WorkflowExecution, lastEvent *types.HistoryEvent, sourceTask *crossClusterSourceTask, - childInfo *p.ChildExecutionInfo, + childInfo *persistence.ChildExecutionInfo, ) { // child already started, advance processing to recorded lastEvent = test.AddChildWorkflowExecutionStartedEvent(mutableState, lastEvent.GetEventID(), constants.TestTargetDomainID, targetExecution.GetWorkflowID(), childExecutionRunID, childInfo.WorkflowTypeName) @@ -512,7 +509,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_GetStartChildRequest_AlreadyStart request *types.CrossClusterTaskRequest, getRequestErr error, sourceTask *crossClusterSourceTask, - childInfo *p.ChildExecutionInfo, + childInfo *persistence.ChildExecutionInfo, initiatedEvent *types.HistoryEvent, ) { s.NoError(getRequestErr) @@ -526,7 +523,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_GetStartChildRequest_AlreadyStart TaskID: sourceTask.GetTaskID(), VisibilityTimestamp: common.Int64Ptr(sourceTask.GetVisibilityTimestamp().UnixNano()), }, request.TaskInfo) - taskInfo := sourceTask.GetInfo().(*p.CrossClusterTaskInfo) + taskInfo := sourceTask.GetInfo().(*persistence.CrossClusterTaskInfo) s.Equal(&types.CrossClusterStartChildExecutionRequestAttributes{ TargetDomainID: taskInfo.TargetDomainID, RequestID: childInfo.CreateRequestID, @@ -549,10 +546,10 @@ func (s *crossClusterTaskSuite) TestSourceTask_GetStartChildRequest_AlreadyStart workflowExecution, targetExecution types.WorkflowExecution, lastEvent *types.HistoryEvent, sourceTask *crossClusterSourceTask, - childInfo *p.ChildExecutionInfo, + childInfo *persistence.ChildExecutionInfo, ) { // child already started, advance processing to recorded - lastEvent = test.AddChildWorkflowExecutionStartedEvent(mutableState, lastEvent.GetEventID(), constants.TestTargetDomainID, targetExecution.GetWorkflowID(), childExecutionRunID, childInfo.WorkflowTypeName) + _ = test.AddChildWorkflowExecutionStartedEvent(mutableState, lastEvent.GetEventID(), constants.TestTargetDomainID, targetExecution.GetWorkflowID(), childExecutionRunID, childInfo.WorkflowTypeName) di := test.AddDecisionTaskScheduledEvent(mutableState) lastEvent = test.AddDecisionTaskStartedEvent(mutableState, di.ScheduleID, mutableState.GetExecutionInfo().TaskList, "some random identity") lastEvent = test.AddDecisionTaskCompletedEvent(mutableState, di.ScheduleID, lastEvent.GetEventID(), nil, "some random identity") @@ -567,7 +564,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_GetStartChildRequest_AlreadyStart request *types.CrossClusterTaskRequest, getRequestErr error, sourceTask *crossClusterSourceTask, - childInfo *p.ChildExecutionInfo, + childInfo *persistence.ChildExecutionInfo, initiatedEvent *types.HistoryEvent, ) { s.NoError(getRequestErr) @@ -581,7 +578,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_GetStartChildRequest_AlreadyStart TaskID: sourceTask.GetTaskID(), VisibilityTimestamp: common.Int64Ptr(sourceTask.GetVisibilityTimestamp().UnixNano()), }, request.TaskInfo) - taskInfo := sourceTask.GetInfo().(*p.CrossClusterTaskInfo) + taskInfo := sourceTask.GetInfo().(*persistence.CrossClusterTaskInfo) s.Equal(&types.CrossClusterStartChildExecutionRequestAttributes{ TargetDomainID: taskInfo.TargetDomainID, RequestID: childInfo.CreateRequestID, @@ -603,7 +600,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_GetStartChildRequest_Duplication( workflowExecution, targetExecution types.WorkflowExecution, lastEvent *types.HistoryEvent, sourceTask *crossClusterSourceTask, - childInfo *p.ChildExecutionInfo, + childInfo *persistence.ChildExecutionInfo, ) { // complete the child workflow, task should be no-op lastEvent = test.AddChildWorkflowExecutionStartedEvent(mutableState, lastEvent.GetEventID(), constants.TestTargetDomainID, targetExecution.GetWorkflowID(), targetExecution.GetRunID(), childInfo.WorkflowTypeName) @@ -622,7 +619,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_GetStartChildRequest_Duplication( request *types.CrossClusterTaskRequest, getRequestErr error, sourceTask *crossClusterSourceTask, - childInfo *p.ChildExecutionInfo, + childInfo *persistence.ChildExecutionInfo, initiatedEvent *types.HistoryEvent, ) { s.NoError(getRequestErr) @@ -639,13 +636,13 @@ func (s *crossClusterTaskSuite) testGetStartChildExecutionRequest( workflowExecution, targetExecution types.WorkflowExecution, lastEvent *types.HistoryEvent, sourceTask *crossClusterSourceTask, - childInfo *p.ChildExecutionInfo, + childInfo *persistence.ChildExecutionInfo, ), validationFn func( request *types.CrossClusterTaskRequest, getRequestErr error, sourceTask *crossClusterSourceTask, - childInfo *p.ChildExecutionInfo, + childInfo *persistence.ChildExecutionInfo, initiatedEvent *types.HistoryEvent, ), ) { @@ -677,7 +674,7 @@ func (s *crossClusterTaskSuite) testGetStartChildExecutionRequest( sourceTask := s.newTestSourceTask( cluster.TestAlternativeClusterName, - &p.CrossClusterTaskInfo{ + &persistence.CrossClusterTaskInfo{ Version: mutableState.GetCurrentVersion(), DomainID: sourceDomainID, WorkflowID: workflowExecution.GetWorkflowID(), @@ -686,7 +683,7 @@ func (s *crossClusterTaskSuite) testGetStartChildExecutionRequest( TargetWorkflowID: targetExecution.GetWorkflowID(), TaskID: int64(59), TaskList: mutableState.GetExecutionInfo().TaskList, - TaskType: p.CrossClusterTaskTypeStartChildExecution, + TaskType: persistence.CrossClusterTaskTypeStartChildExecution, ScheduleID: event.GetEventID(), }, ) @@ -707,7 +704,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_GetCancelRequest_Duplication() { workflowExecution, targetExecution types.WorkflowExecution, lastEvent *types.HistoryEvent, sourceTask *crossClusterSourceTask, - cancelInfo *p.RequestCancelInfo, + cancelInfo *persistence.RequestCancelInfo, ) { lastEvent = test.AddCancelRequestedEvent(mutableState, lastEvent.GetEventID(), constants.TestTargetDomainID, targetExecution.GetWorkflowID(), targetExecution.GetRunID()) mutableState.FlushBufferedEvents() @@ -720,7 +717,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_GetCancelRequest_Duplication() { request *types.CrossClusterTaskRequest, getRequestErr error, sourceTask *crossClusterSourceTask, - cancelInfo *p.RequestCancelInfo, + cancelInfo *persistence.RequestCancelInfo, ) { s.NoError(getRequestErr) s.Nil(request) @@ -737,7 +734,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_GetCancelRequest_Success() { workflowExecution, targetExecution types.WorkflowExecution, lastEvent *types.HistoryEvent, sourceTask *crossClusterSourceTask, - cancelInfo *p.RequestCancelInfo, + cancelInfo *persistence.RequestCancelInfo, ) { persistenceMutableState, err := test.CreatePersistenceMutableState(mutableState, lastEvent.GetEventID(), lastEvent.GetVersion()) s.NoError(err) @@ -747,7 +744,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_GetCancelRequest_Success() { request *types.CrossClusterTaskRequest, getRequestErr error, sourceTask *crossClusterSourceTask, - cancelInfo *p.RequestCancelInfo, + cancelInfo *persistence.RequestCancelInfo, ) { s.NoError(getRequestErr) s.NotNil(request) @@ -760,7 +757,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_GetCancelRequest_Success() { TaskID: sourceTask.GetTaskID(), VisibilityTimestamp: common.Int64Ptr(sourceTask.GetVisibilityTimestamp().UnixNano()), }, request.TaskInfo) - taskInfo := sourceTask.GetInfo().(*p.CrossClusterTaskInfo) + taskInfo := sourceTask.GetInfo().(*persistence.CrossClusterTaskInfo) s.Equal(&types.CrossClusterCancelExecutionRequestAttributes{ TargetDomainID: taskInfo.TargetDomainID, TargetWorkflowID: taskInfo.TargetWorkflowID, @@ -782,13 +779,13 @@ func (s *crossClusterTaskSuite) testGetCancelExecutionRequest( workflowExecution, targetExecution types.WorkflowExecution, lastEvent *types.HistoryEvent, sourceTask *crossClusterSourceTask, - cancelInfo *p.RequestCancelInfo, + cancelInfo *persistence.RequestCancelInfo, ), validationFn func( request *types.CrossClusterTaskRequest, getRequestErr error, sourceTask *crossClusterSourceTask, - cancelInfo *p.RequestCancelInfo, + cancelInfo *persistence.RequestCancelInfo, ), ) { workflowExecution, mutableState, decisionCompletionID, err := test.SetupWorkflowWithCompletedDecision(s.mockShard, sourceDomainID) @@ -809,7 +806,7 @@ func (s *crossClusterTaskSuite) testGetCancelExecutionRequest( sourceTask := s.newTestSourceTask( cluster.TestAlternativeClusterName, - &p.CrossClusterTaskInfo{ + &persistence.CrossClusterTaskInfo{ Version: mutableState.GetCurrentVersion(), DomainID: sourceDomainID, WorkflowID: workflowExecution.GetWorkflowID(), @@ -819,7 +816,7 @@ func (s *crossClusterTaskSuite) testGetCancelExecutionRequest( TargetRunID: targetExecution.GetRunID(), TaskID: int64(59), TaskList: mutableState.GetExecutionInfo().TaskList, - TaskType: p.CrossClusterTaskTypeCancelExecution, + TaskType: persistence.CrossClusterTaskTypeCancelExecution, ScheduleID: event.GetEventID(), }, ) @@ -840,7 +837,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_GetSignalRequest_Duplication() { workflowExecution, targetExecution types.WorkflowExecution, lastEvent *types.HistoryEvent, sourceTask *crossClusterSourceTask, - signalInfo *p.SignalInfo, + signalInfo *persistence.SignalInfo, ) { lastEvent = test.AddSignaledEvent(mutableState, lastEvent.GetEventID(), constants.TestTargetDomainID, targetExecution.GetWorkflowID(), targetExecution.GetRunID(), signalInfo.Control) mutableState.FlushBufferedEvents() @@ -853,7 +850,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_GetSignalRequest_Duplication() { request *types.CrossClusterTaskRequest, getRequestErr error, sourceTask *crossClusterSourceTask, - signalInfo *p.SignalInfo, + signalInfo *persistence.SignalInfo, ) { s.NoError(getRequestErr) s.Nil(request) @@ -870,7 +867,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_GetSignalRequest_Success() { workflowExecution, targetExecution types.WorkflowExecution, lastEvent *types.HistoryEvent, sourceTask *crossClusterSourceTask, - signalInfo *p.SignalInfo, + signalInfo *persistence.SignalInfo, ) { persistenceMutableState, err := test.CreatePersistenceMutableState(mutableState, lastEvent.GetEventID(), lastEvent.GetVersion()) s.NoError(err) @@ -880,7 +877,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_GetSignalRequest_Success() { request *types.CrossClusterTaskRequest, getRequestErr error, sourceTask *crossClusterSourceTask, - signalInfo *p.SignalInfo, + signalInfo *persistence.SignalInfo, ) { s.NoError(getRequestErr) s.NotNil(request) @@ -893,7 +890,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_GetSignalRequest_Success() { TaskID: sourceTask.GetTaskID(), VisibilityTimestamp: common.Int64Ptr(sourceTask.GetVisibilityTimestamp().UnixNano()), }, request.TaskInfo) - taskInfo := sourceTask.GetInfo().(*p.CrossClusterTaskInfo) + taskInfo := sourceTask.GetInfo().(*persistence.CrossClusterTaskInfo) s.Equal(&types.CrossClusterSignalExecutionRequestAttributes{ TargetDomainID: taskInfo.TargetDomainID, TargetWorkflowID: taskInfo.TargetWorkflowID, @@ -919,7 +916,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_GetSignalRequest_Failure() { workflowExecution, targetExecution types.WorkflowExecution, lastEvent *types.HistoryEvent, sourceTask *crossClusterSourceTask, - signalInfo *p.SignalInfo, + signalInfo *persistence.SignalInfo, ) { sourceTask.processingState = processingStateResponseRecorded s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(nil, errors.New("some random error")) @@ -928,7 +925,7 @@ func (s *crossClusterTaskSuite) TestSourceTask_GetSignalRequest_Failure() { request *types.CrossClusterTaskRequest, getRequestErr error, sourceTask *crossClusterSourceTask, - signalInfo *p.SignalInfo, + signalInfo *persistence.SignalInfo, ) { s.Error(getRequestErr) s.Nil(request) @@ -945,13 +942,13 @@ func (s *crossClusterTaskSuite) testGetSignalExecutionRequest( workflowExecution, targetExecution types.WorkflowExecution, lastEvent *types.HistoryEvent, sourceTask *crossClusterSourceTask, - signalInfo *p.SignalInfo, + signalInfo *persistence.SignalInfo, ), validationFn func( request *types.CrossClusterTaskRequest, getRequestErr error, sourceTask *crossClusterSourceTask, - signalInfo *p.SignalInfo, + signalInfo *persistence.SignalInfo, ), ) { workflowExecution, mutableState, decisionCompletionID, err := test.SetupWorkflowWithCompletedDecision(s.mockShard, sourceDomainID) @@ -975,7 +972,7 @@ func (s *crossClusterTaskSuite) testGetSignalExecutionRequest( sourceTask := s.newTestSourceTask( cluster.TestAlternativeClusterName, - &p.CrossClusterTaskInfo{ + &persistence.CrossClusterTaskInfo{ Version: mutableState.GetCurrentVersion(), DomainID: sourceDomainID, WorkflowID: workflowExecution.GetWorkflowID(), @@ -985,7 +982,7 @@ func (s *crossClusterTaskSuite) testGetSignalExecutionRequest( TargetRunID: targetExecution.GetRunID(), TaskID: int64(59), TaskList: mutableState.GetExecutionInfo().TaskList, - TaskType: p.CrossClusterTaskTypeSignalExecution, + TaskType: persistence.CrossClusterTaskTypeSignalExecution, ScheduleID: event.GetEventID(), }, ) @@ -1000,7 +997,7 @@ func (s *crossClusterTaskSuite) testGetSignalExecutionRequest( func (s *crossClusterTaskSuite) newTestSourceTask( targetCluster string, - taskInfo *p.CrossClusterTaskInfo, + taskInfo *persistence.CrossClusterTaskInfo, ) *crossClusterSourceTask { return NewCrossClusterSourceTask( s.mockShard, diff --git a/service/history/task/standby_task_util.go b/service/history/task/standby_task_util.go index a757a102323..e0b400b5cf5 100644 --- a/service/history/task/standby_task_util.go +++ b/service/history/task/standby_task_util.go @@ -105,10 +105,6 @@ func standbyTimerTaskPostActionTaskDiscarded( type ( historyResendInfo struct { - // used by 2DC, since 2DC only has one branch - // TODO deprecate this nextEventID - nextEventID *int64 - // used by NDC lastEventID *int64 lastEventVersion *int64 diff --git a/service/history/task/timer_active_task_executor_test.go b/service/history/task/timer_active_task_executor_test.go index c97a102f52b..94d2cb66c97 100644 --- a/service/history/task/timer_active_task_executor_test.go +++ b/service/history/task/timer_active_task_executor_test.go @@ -797,6 +797,7 @@ func (s *timerActiveTaskExecutorSuite) TestDecisionScheduleToStartTimeout_Normal }) persistenceMutableState, err := test.CreatePersistenceMutableState(mutableState, di.ScheduleID, di.Version) + s.NoError(err) s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil).Once() s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.MatchedBy(func(req *persistence.UpdateWorkflowExecutionRequest) bool { @@ -834,6 +835,7 @@ func (s *timerActiveTaskExecutorSuite) TestDecisionScheduleToStartTimeout_Transi }) persistenceMutableState, err := test.CreatePersistenceMutableState(mutableState, di.ScheduleID, di.Version) + s.NoError(err) s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil).Once() s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.MatchedBy(func(req *persistence.UpdateWorkflowExecutionRequest) bool { return req.UpdateWorkflowMutation.ExecutionInfo.DecisionAttempt == 2 && @@ -872,6 +874,7 @@ func (s *timerActiveTaskExecutorSuite) TestDecisionScheduleToStartTimeout_Sticky }) persistenceMutableState, err := test.CreatePersistenceMutableState(mutableState, di.ScheduleID, di.Version) + s.NoError(err) s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil).Once() s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.MatchedBy(func(req *persistence.UpdateWorkflowExecutionRequest) bool { @@ -909,6 +912,7 @@ func (s *timerActiveTaskExecutorSuite) TestDecisionStartToCloseTimeout_Fire() { }) persistenceMutableState, err := test.CreatePersistenceMutableState(mutableState, startedEvent.GetEventID(), startedEvent.GetVersion()) + s.NoError(err) s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil).Once() s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() diff --git a/service/history/task/transfer_active_task_executor.go b/service/history/task/transfer_active_task_executor.go index 0d48df7a2a3..42c57dffc80 100644 --- a/service/history/task/transfer_active_task_executor.go +++ b/service/history/task/transfer_active_task_executor.go @@ -59,8 +59,8 @@ var ( ) var ( - errUnknownTransferTask = errors.New("Unknown transfer task") - errWorkflowBusy = errors.New("Unable to get workflow execution lock within specified timeout") + errUnknownTransferTask = errors.New("unknown transfer task") + errWorkflowBusy = errors.New("unable to get workflow execution lock within specified timeout") errTargetDomainNotActive = errors.New("target domain not active") ) diff --git a/service/history/task/transfer_active_task_executor_test.go b/service/history/task/transfer_active_task_executor_test.go index c32479ec90e..bebc9275c2e 100644 --- a/service/history/task/transfer_active_task_executor_test.go +++ b/service/history/task/transfer_active_task_executor_test.go @@ -47,7 +47,6 @@ import ( "github.com/uber/cadence/common/log" "github.com/uber/cadence/common/mocks" "github.com/uber/cadence/common/persistence" - p "github.com/uber/cadence/common/persistence" "github.com/uber/cadence/common/types" "github.com/uber/cadence/service/history/config" "github.com/uber/cadence/service/history/constants" @@ -480,7 +479,7 @@ func (s *transferActiveTaskExecutorSuite) TestProcessCloseExecution_HasParentCro s.Len(crossClusterTasks, 1) s.Equal(persistence.CrossClusterTaskTypeRecordChildExeuctionCompleted, crossClusterTasks[0].GetType()) return true - })).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + })).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() }, false, ) @@ -704,7 +703,7 @@ func (s *transferActiveTaskExecutorSuite) expectCrossClusterApplyParentPolicyCal s.Len(crossClusterTasks, 1) s.Equal(persistence.CrossClusterTaskTypeApplyParentClosePolicy, crossClusterTasks[0].GetType()) return true - })).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + })).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(int64(common.EmptyVersion)).Return(s.mockClusterMetadata.GetCurrentClusterName()).AnyTimes() s.mockDomainCache.EXPECT().GetDomain(s.remoteTargetDomainName).Return(s.remoteTargetDomainEntry, nil).AnyTimes() } @@ -904,8 +903,8 @@ func (s *transferActiveTaskExecutorSuite) TestProcessCancelExecution_Success() { s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) cancelRequest := createTestRequestCancelWorkflowExecutionRequest(s.targetDomainName, transferTask.GetInfo().(*persistence.TransferTaskInfo), requestCancelInfo.CancelRequestID) s.mockHistoryClient.EXPECT().RequestCancelWorkflowExecution(gomock.Any(), cancelRequest).Return(nil).Times(1) - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() }, ) } @@ -925,8 +924,8 @@ func (s *transferActiveTaskExecutorSuite) TestProcessCancelExecution_Failure() { s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) cancelRequest := createTestRequestCancelWorkflowExecutionRequest(s.targetDomainName, transferTask.GetInfo().(*persistence.TransferTaskInfo), requestCancelInfo.CancelRequestID) s.mockHistoryClient.EXPECT().RequestCancelWorkflowExecution(gomock.Any(), cancelRequest).Return(&types.EntityNotExistsError{}).Times(1) - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() }, ) } @@ -970,7 +969,7 @@ func (s *transferActiveTaskExecutorSuite) TestProcessCancelExecution_CrossCluste s.Len(crossClusterTasks, 1) s.Equal(persistence.CrossClusterTaskTypeCancelExecution, crossClusterTasks[0].GetType()) return true - })).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + })).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() }, ) } @@ -1070,8 +1069,8 @@ func (s *transferActiveTaskExecutorSuite) TestProcessSignalExecution_Success() { s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) signalRequest := createTestSignalWorkflowExecutionRequest(s.targetDomainName, transferTask.GetInfo().(*persistence.TransferTaskInfo), signalInfo) s.mockHistoryClient.EXPECT().SignalWorkflowExecution(gomock.Any(), signalRequest).Return(nil).Times(1) - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() taskInfo := transferTask.GetInfo().(*persistence.TransferTaskInfo) s.mockHistoryClient.EXPECT().RemoveSignalMutableState(gomock.Any(), &types.RemoveSignalMutableStateRequest{ @@ -1101,8 +1100,8 @@ func (s *transferActiveTaskExecutorSuite) TestProcessSignalExecution_Failure() { s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) signalRequest := createTestSignalWorkflowExecutionRequest(s.targetDomainName, transferTask.GetInfo().(*persistence.TransferTaskInfo), signalInfo) s.mockHistoryClient.EXPECT().SignalWorkflowExecution(gomock.Any(), signalRequest).Return(&types.EntityNotExistsError{}).Times(1) - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() }, ) } @@ -1166,7 +1165,7 @@ func (s *transferActiveTaskExecutorSuite) TestProcessSignalExecution_CrossCluste s.Len(crossClusterTasks, 1) s.Equal(persistence.CrossClusterTaskTypeSignalExecution, crossClusterTasks[0].GetType()) return true - })).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + })).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() }, ) } @@ -1259,8 +1258,8 @@ func (s *transferActiveTaskExecutorSuite) TestProcessStartChildExecution_Success s.mockShard.GetTimeSource().Now(), ) s.mockHistoryClient.EXPECT().StartWorkflowExecution(gomock.Any(), historyReq).Return(&types.StartWorkflowExecutionResponse{RunID: childExecution.RunID}, nil).Times(1) - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() s.mockHistoryClient.EXPECT().ScheduleDecisionTask(gomock.Any(), &types.ScheduleDecisionTaskRequest{ DomainUUID: s.childDomainID, WorkflowExecution: &types.WorkflowExecution{ @@ -1298,8 +1297,8 @@ func (s *transferActiveTaskExecutorSuite) TestProcessStartChildExecution_Failure s.mockShard.GetTimeSource().Now(), ) s.mockHistoryClient.EXPECT().StartWorkflowExecution(gomock.Any(), historyReq).Return(nil, &types.WorkflowExecutionAlreadyStartedError{}).Times(1) - s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&p.AppendHistoryNodesResponse{Size: 0}, nil).Once() - s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + s.mockHistoryV2Mgr.On("AppendHistoryNodes", mock.Anything, mock.Anything).Return(&persistence.AppendHistoryNodesResponse{Size: 0}, nil).Once() + s.mockExecutionMgr.On("UpdateWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() }, ) } @@ -1318,7 +1317,7 @@ func (s *transferActiveTaskExecutorSuite) TestProcessStartChildExecution_TargetN s.NoError(err) s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) taskInfo := transferTask.GetInfo().(*persistence.TransferTaskInfo) - event, err = mutableState.GetChildExecutionInitiatedEvent(context.Background(), taskInfo.ScheduleID) + _, err = mutableState.GetChildExecutionInitiatedEvent(context.Background(), taskInfo.ScheduleID) s.NoError(err) s.mockHistoryClient.EXPECT().StartWorkflowExecution(gomock.Any(), gomock.Any()).Return(nil, &types.DomainNotActiveError{}).Times(1) }, @@ -1455,7 +1454,7 @@ func (s *transferActiveTaskExecutorSuite) TestProcessStartChildExecution_CrossCl s.Len(crossClusterTasks, 1) s.Equal(persistence.CrossClusterTaskTypeStartChildExecution, crossClusterTasks[0].GetType()) return true - })).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + })).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() }, ) } @@ -1470,7 +1469,7 @@ func (s *transferActiveTaskExecutorSuite) TestProcessStartChildExecution_CrossCl transferTask Task, childInfo *persistence.ChildExecutionInfo, ) { - event = test.AddChildWorkflowExecutionStartedEvent(mutableState, event.GetEventID(), s.childDomainID, childExecution.WorkflowID, childExecution.RunID, childInfo.WorkflowTypeName) + _ = test.AddChildWorkflowExecutionStartedEvent(mutableState, event.GetEventID(), s.childDomainID, childExecution.WorkflowID, childExecution.RunID, childInfo.WorkflowTypeName) di := test.AddDecisionTaskScheduledEvent(mutableState) event = test.AddDecisionTaskStartedEvent(mutableState, di.ScheduleID, mutableState.GetExecutionInfo().TaskList, "some random identity") event = test.AddDecisionTaskCompletedEvent(mutableState, di.ScheduleID, event.GetEventID(), nil, "some random identity") @@ -1486,7 +1485,7 @@ func (s *transferActiveTaskExecutorSuite) TestProcessStartChildExecution_CrossCl s.Len(crossClusterTasks, 1) s.Equal(persistence.CrossClusterTaskTypeStartChildExecution, crossClusterTasks[0].GetType()) return true - })).Return(&p.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &p.MutableStateUpdateSessionStats{}}, nil).Once() + })).Return(&persistence.UpdateWorkflowExecutionResponse{MutableStateUpdateSessionStats: &persistence.MutableStateUpdateSessionStats{}}, nil).Once() }, ) } diff --git a/service/matching/matcher.go b/service/matching/matcher.go index e5ce6e6f771..e8ea3031c32 100644 --- a/service/matching/matcher.go +++ b/service/matching/matcher.go @@ -412,7 +412,7 @@ func (tm *TaskMatcher) ratelimit(ctx context.Context) (*rate.Reservation, error) rsv := tm.limiter.Reserve() // If we have to wait too long for reservation, give up and return - if !rsv.OK() || rsv.Delay() > deadline.Sub(time.Now()) { + if !rsv.OK() || rsv.Delay() > time.Until(deadline) { if rsv.OK() { // if we were indeed given a reservation, return it before we bail out rsv.Cancel() } diff --git a/service/matching/matchingEngine.go b/service/matching/matchingEngine.go index f3b8761075c..12f27608c8f 100644 --- a/service/matching/matchingEngine.go +++ b/service/matching/matchingEngine.go @@ -94,8 +94,8 @@ var ( historyServiceOperationRetryPolicy = common.CreateHistoryServiceRetryPolicy() // ErrNoTasks is exported temporarily for integration test - ErrNoTasks = errors.New("No tasks") - errPumpClosed = errors.New("Task list pump closed its channel") + ErrNoTasks = errors.New("no tasks") + errPumpClosed = errors.New("task list pump closed its channel") pollerIDKey pollerIDCtxKey = "pollerID" identityKey identityCtxKey = "identity" @@ -737,6 +737,9 @@ func (e *matchingEngineImpl) getAllPartitions( } taskList := request.GetTaskList() taskListID, err := newTaskListID(domainID, taskList.GetName(), taskListType) + if err != nil { + return partitionKeys, err + } rootPartition := taskListID.GetRoot() partitionKeys = append(partitionKeys, rootPartition) diff --git a/service/matching/matchingEngine_test.go b/service/matching/matchingEngine_test.go index 35899a07a47..14aadab94a8 100644 --- a/service/matching/matchingEngine_test.go +++ b/service/matching/matchingEngine_test.go @@ -92,8 +92,8 @@ func (s *matchingEngineSuite) TasksHandler(w http.ResponseWriter, r *http.Reques s.Lock() defer s.Unlock() w.Header().Set("Content-Type", "text/plain; charset=utf-8") - fmt.Fprint(w, fmt.Sprintf("%v\n", s.taskManager)) - fmt.Fprint(w, fmt.Sprintf("%v\n", s.matchingEngine)) + fmt.Fprintf(w, "%v\n", s.taskManager) + fmt.Fprintf(w, "%v\n", s.matchingEngine) } func (s *matchingEngineSuite) TearDownSuite() { @@ -521,7 +521,7 @@ func (s *matchingEngineSuite) TestAddThenConsumeActivities() { s.NoError(err) s.NotNil(result) if len(result.TaskToken) == 0 { - s.logger.Debug(fmt.Sprintf("empty poll returned")) + s.logger.Debug("empty poll returned") continue } s.EqualValues(activityID, result.ActivityID) @@ -663,7 +663,7 @@ func (s *matchingEngineSuite) TestSyncMatchActivities() { if len(result.TaskToken) == 0 { // when ratelimit is set to zero, poller is expected to return empty result // reset ratelimit, poll again and make sure task is returned this time - s.logger.Debug(fmt.Sprintf("empty poll returned")) + s.logger.Debug("empty poll returned") s.Equal(float64(0), maxDispatch) maxDispatch = _defaultTaskDispatchRPS wg.Add(1) @@ -858,7 +858,7 @@ func (s *matchingEngineSuite) concurrentPublishConsumeActivities( s.NoError(err) s.NotNil(result) if len(result.TaskToken) == 0 { - s.logger.Debug(fmt.Sprintf("empty poll returned")) + s.logger.Debug("empty poll returned") continue } s.EqualValues(activityID, result.ActivityID) @@ -981,7 +981,7 @@ func (s *matchingEngineSuite) TestConcurrentPublishConsumeDecisions() { } s.NotNil(result) if len(result.TaskToken) == 0 { - s.logger.Debug(fmt.Sprintf("empty poll returned")) + s.logger.Debug("empty poll returned") continue } s.EqualValues(workflowExecution, *result.WorkflowExecution) @@ -1160,7 +1160,7 @@ func (s *matchingEngineSuite) TestMultipleEnginesActivitiesRangeStealing() { } s.NotNil(result) if len(result.TaskToken) == 0 { - s.logger.Debug(fmt.Sprintf("empty poll returned")) + s.logger.Debug("empty poll returned") continue } s.EqualValues(activityID, result.ActivityID) @@ -1297,7 +1297,7 @@ func (s *matchingEngineSuite) TestMultipleEnginesDecisionsRangeStealing() { } s.NotNil(result) if len(result.TaskToken) == 0 { - s.logger.Debug(fmt.Sprintf("empty poll returned")) + s.logger.Debug("empty poll returned") continue } s.EqualValues(workflowExecution, *result.WorkflowExecution) @@ -1470,7 +1470,7 @@ func (s *matchingEngineSuite) TestTaskListManagerGetTaskBatch() { s.NoError(err) s.NotNil(result) if len(result.TaskToken) == 0 { - s.logger.Debug(fmt.Sprintf("empty poll returned")) + s.logger.Debug("empty poll returned") continue } } @@ -1679,7 +1679,6 @@ func (m *testTaskManager) GetName() string { } func (m *testTaskManager) Close() { - return } func (m *testTaskManager) getTaskListManager(id *taskListID) *testTaskListManager { diff --git a/service/matching/taskGC.go b/service/matching/taskGC.go index 4da894afb6f..d725780bc6d 100644 --- a/service/matching/taskGC.go +++ b/service/matching/taskGC.go @@ -86,7 +86,7 @@ func (tgc *taskGC) checkPrecond(ackLevel int64, batchSize int, ignoreTimeCond bo if backlog >= int64(batchSize) { return true } - return backlog > 0 && (ignoreTimeCond || time.Now().Sub(tgc.lastDeleteTime) > maxTimeBetweenTaskDeletes) + return backlog > 0 && (ignoreTimeCond || time.Since(tgc.lastDeleteTime) > maxTimeBetweenTaskDeletes) } func (tgc *taskGC) tryLock() bool { diff --git a/service/matching/taskListManager.go b/service/matching/taskListManager.go index 5240955df0d..f1dbccc4e4f 100644 --- a/service/matching/taskListManager.go +++ b/service/matching/taskListManager.go @@ -561,7 +561,7 @@ func (c *taskListManagerImpl) newChildContext( if !ok { return context.WithTimeout(parent, timeout) } - remaining := deadline.Sub(time.Now()) - tailroom + remaining := time.Until(deadline) - tailroom if remaining < timeout { timeout = time.Duration(common.MaxInt64(0, int64(remaining))) } diff --git a/service/matching/taskListManager_test.go b/service/matching/taskListManager_test.go index 0e54e9d979f..b4b0dddf7c7 100644 --- a/service/matching/taskListManager_test.go +++ b/service/matching/taskListManager_test.go @@ -329,6 +329,6 @@ func TestAddTaskStandby(t *testing.T) { addTaskParam.forwardedFrom = "from child partition" syncMatch, err = tlm.AddTask(context.Background(), addTaskParam) - require.Error(t, errRemoteSyncMatchFailed) // should not persist the task + require.Error(t, err) // should not persist the task require.False(t, syncMatch) } diff --git a/service/matching/taskReader.go b/service/matching/taskReader.go index df4b840a3bd..060a8d47f25 100644 --- a/service/matching/taskReader.go +++ b/service/matching/taskReader.go @@ -292,7 +292,7 @@ func (tr *taskReader) persistAckLevel() error { } func (tr *taskReader) isTaskAddedRecently(lastAddTime time.Time) bool { - return time.Now().Sub(lastAddTime) <= tr.tlMgr.config.MaxTasklistIdleTime() + return time.Since(lastAddTime) <= tr.tlMgr.config.MaxTasklistIdleTime() } func (tr *taskReader) logger() log.Logger { diff --git a/service/worker/archiver/handler_test.go b/service/worker/archiver/handler_test.go index 2930d0b26cd..473a48ba7fc 100644 --- a/service/worker/archiver/handler_test.go +++ b/service/worker/archiver/handler_test.go @@ -217,7 +217,7 @@ func startAndFinishArchiverWorkflow(ctx workflow.Context, concurrency int, numRe requestCh := workflow.NewBufferedChannel(ctx, numRequests) handler := NewHandler(ctx, handlerTestLogger, handlerTestMetrics, concurrency, requestCh) handler.Start() - sentHashes := make([]uint64, numRequests, numRequests) + sentHashes := make([]uint64, numRequests) workflow.Go(ctx, func(ctx workflow.Context) { for i := 0; i < numRequests; i++ { ar, hash := randomArchiveRequest() diff --git a/service/worker/archiver/pump_test.go b/service/worker/archiver/pump_test.go index 7dc6804cdde..9c64be2ed43 100644 --- a/service/worker/archiver/pump_test.go +++ b/service/worker/archiver/pump_test.go @@ -234,8 +234,8 @@ func signalAndCarryoverPumpWorkflow(ctx workflow.Context, requestLimit int, carr } func sendRequestsToChannel(ctx workflow.Context, ch workflow.Channel, numRequests int) ([]ArchiveRequest, []uint64) { - requests := make([]ArchiveRequest, numRequests, numRequests) - hashes := make([]uint64, numRequests, numRequests) + requests := make([]ArchiveRequest, numRequests) + hashes := make([]uint64, numRequests) workflow.Go(ctx, func(ctx workflow.Context) { for i := 0; i < numRequests; i++ { requests[i], hashes[i] = randomArchiveRequest() @@ -246,8 +246,8 @@ func sendRequestsToChannel(ctx workflow.Context, ch workflow.Channel, numRequest } func sendRequestsToChannelBlocking(ctx workflow.Context, ch workflow.Channel, numRequests int) ([]ArchiveRequest, []uint64) { - requests := make([]ArchiveRequest, numRequests, numRequests) - hashes := make([]uint64, numRequests, numRequests) + requests := make([]ArchiveRequest, numRequests) + hashes := make([]uint64, numRequests) for i := 0; i < numRequests; i++ { requests[i], hashes[i] = randomArchiveRequest() ch.Send(ctx, requests[i]) @@ -265,15 +265,12 @@ func channelContainsExpected(ctx workflow.Context, ch workflow.Channel, expected return false } } - if ch.Receive(ctx, nil) { - return false - } - return true + return !ch.Receive(ctx, nil) } func randomCarryover(count int) ([]ArchiveRequest, []uint64) { - carryover := make([]ArchiveRequest, count, count) - hashes := make([]uint64, count, count) + carryover := make([]ArchiveRequest, count) + hashes := make([]uint64, count) for i := 0; i < count; i++ { carryover[i], hashes[i] = randomArchiveRequest() } diff --git a/service/worker/scanner/data_corruption_workflow_test.go b/service/worker/scanner/data_corruption_workflow_test.go index 4d389138996..f677d26ac0e 100644 --- a/service/worker/scanner/data_corruption_workflow_test.go +++ b/service/worker/scanner/data_corruption_workflow_test.go @@ -43,6 +43,8 @@ import ( const testWorkflowName = "default-test-workflow-type-name" +var validBranchToken = []byte{89, 11, 0, 10, 0, 0, 0, 12, 116, 101, 115, 116, 45, 116, 114, 101, 101, 45, 105, 100, 11, 0, 20, 0, 0, 0, 14, 116, 101, 115, 116, 45, 98, 114, 97, 110, 99, 104, 45, 105, 100, 0} + type dataCorruptionWorkflowTestSuite struct { suite.Suite testsuite.WorkflowTestSuite @@ -111,13 +113,13 @@ func (s *dataCorruptionWorkflowTestSuite) TestExecutionFixerActivity_Success() { mockResource.ExecutionMgr.On("GetWorkflowExecution", mock.Anything, mock.Anything).Return(&p.GetWorkflowExecutionResponse{ State: &p.WorkflowMutableState{ ExecutionInfo: &p.WorkflowExecutionInfo{ - BranchToken: []byte{}, + BranchToken: validBranchToken, }, VersionHistories: &p.VersionHistories{ CurrentVersionHistoryIndex: 0, Histories: []*p.VersionHistory{ { - BranchToken: []byte{}, + BranchToken: validBranchToken, }, }, }, diff --git a/service/worker/scanner/shardscanner/aggregators_test.go b/service/worker/scanner/shardscanner/aggregators_test.go index c5e8d3a20e9..bb83267d0f6 100644 --- a/service/worker/scanner/shardscanner/aggregators_test.go +++ b/service/worker/scanner/shardscanner/aggregators_test.go @@ -546,7 +546,7 @@ func (s *aggregatorsSuite) TestGetShardSizeQueryResult() { expectedResult: nil, }, { - shardSizes: make([]ShardSizeTuple, 10, 10), + shardSizes: make([]ShardSizeTuple, 10), req: ShardSizeQueryRequest{ StartIndex: 0, EndIndex: 11, @@ -555,7 +555,7 @@ func (s *aggregatorsSuite) TestGetShardSizeQueryResult() { expectedResult: nil, }, { - shardSizes: make([]ShardSizeTuple, 10000, 10000), + shardSizes: make([]ShardSizeTuple, 10000), req: ShardSizeQueryRequest{ StartIndex: 0, EndIndex: maxShardQueryResult + 1, diff --git a/service/worker/scanner/shardscanner/shardscannertest/workflow_test.go b/service/worker/scanner/shardscanner/shardscannertest/workflow_test.go index a72637e307a..ba5b47e4e9f 100644 --- a/service/worker/scanner/shardscanner/shardscannertest/workflow_test.go +++ b/service/worker/scanner/shardscanner/shardscannertest/workflow_test.go @@ -163,7 +163,7 @@ func (s *workflowsSuite) TestScannerWorkflow_Success_Disabled() { func (s *workflowsSuite) TestFixerWorkflow_Success() { env := s.NewTestWorkflowEnvironment() - corruptedKeys := make([]shardscanner.CorruptedKeysEntry, 30, 30) + corruptedKeys := make([]shardscanner.CorruptedKeysEntry, 30) for i := 0; i < 30; i++ { corruptedKeys[i] = shardscanner.CorruptedKeysEntry{ ShardID: i, diff --git a/service/worker/scanner/tasklist/handler.go b/service/worker/scanner/tasklist/handler.go index da5c3af7587..b41747c3c04 100644 --- a/service/worker/scanner/tasklist/handler.go +++ b/service/worker/scanner/tasklist/handler.go @@ -102,7 +102,7 @@ func (s *Scavenger) tryDeleteTaskList(info *p.TaskListInfo) { if strings.HasPrefix(info.Name, scannerTaskListPrefix) { return // avoid deleting our own task list } - delta := time.Now().Sub(info.LastUpdated) + delta := time.Since(info.LastUpdated) if delta < taskListGracePeriod { return } diff --git a/service/worker/shadower/workflow.go b/service/worker/shadower/workflow.go index c9139e253ed..1e2bf87cfb0 100644 --- a/service/worker/shadower/workflow.go +++ b/service/worker/shadower/workflow.go @@ -194,7 +194,7 @@ func validateAndFillWorkflowParams( config *workflowConfig, ) error { if len(params.GetDomain()) == 0 { - return errors.New("Domain is not set on shadower workflow params") + return errors.New("domain is not set on shadower workflow params") } if len(params.GetTaskList()) == 0 { diff --git a/service/worker/shadower/workflow_test.go b/service/worker/shadower/workflow_test.go index 0d84d9909e0..3c0d5aca745 100644 --- a/service/worker/shadower/workflow_test.go +++ b/service/worker/shadower/workflow_test.go @@ -352,6 +352,7 @@ func (s *workflowSuite) TestShadowWorkflow_ContinueAsNew_MaxShadowCount() { s.True(ok) s.Equal(shadower.WorkflowName, continueAsNewErr.WorkflowType().Name) shadowParams, ok := continueAsNewErr.Args()[0].(shadower.WorkflowParams) + s.True(ok) s.Equal(testActiveDomainName, shadowParams.GetDomain()) s.Equal(testTaskListName, shadowParams.GetTaskList()) s.Equal(testWorkflowQuery, shadowParams.GetWorkflowQuery()) @@ -408,6 +409,7 @@ func (s *workflowSuite) TestShadowWorkflow_ContinueAsNew_ContinuousShadowing() { s.True(ok) s.Equal(shadower.WorkflowName, continueAsNewErr.WorkflowType().Name) shadowParams, ok := continueAsNewErr.Args()[0].(shadower.WorkflowParams) + s.True(ok) s.Equal(testActiveDomainName, shadowParams.GetDomain()) s.Equal(testTaskListName, shadowParams.GetTaskList()) s.Equal(testWorkflowQuery, shadowParams.GetWorkflowQuery()) diff --git a/tools/cassandra/cqlclient.go b/tools/cassandra/cqlclient.go index 57cb7cd354f..7ac5e0d1a16 100644 --- a/tools/cassandra/cqlclient.go +++ b/tools/cassandra/cqlclient.go @@ -53,8 +53,7 @@ type ( } ) -var errNoHosts = errors.New("Cassandra Hosts list is empty or malformed") -var errGetSchemaVersion = errors.New("Failed to get current schema version from cassandra") +var errGetSchemaVersion = errors.New("failed to get current schema version from cassandra") const ( DefaultTimeout = 30 // Timeout in seconds diff --git a/tools/cassandra/handler.go b/tools/cassandra/handler.go index 36fb5e4f424..ad67c9e540a 100644 --- a/tools/cassandra/handler.go +++ b/tools/cassandra/handler.go @@ -222,7 +222,3 @@ func handleErr(err error) error { log.Println(err) return err } - -func logErr(err error) { - log.Println(err) -} diff --git a/tools/cli/adminConfigStoreCommands.go b/tools/cli/adminConfigStoreCommands.go index 974197ef56c..79c8d8c114e 100644 --- a/tools/cli/adminConfigStoreCommands.go +++ b/tools/cli/adminConfigStoreCommands.go @@ -56,7 +56,7 @@ func AdminGetDynamicConfig(c *cli.Context) { ctx, cancel := newContext(c) defer cancel() - if filters == nil || len(filters) == 0 { + if len(filters) == 0 { req := &types.ListDynamicConfigRequest{ ConfigName: dcName, } diff --git a/tools/cli/adminDBCleanCommand.go b/tools/cli/adminDBCleanCommand.go index eda9ffc4e3e..aa8cdcebba7 100644 --- a/tools/cli/adminDBCleanCommand.go +++ b/tools/cli/adminDBCleanCommand.go @@ -65,7 +65,7 @@ func AdminDBDataDecodeThrift(c *cli.Context) { if err != nil { ErrorAndExit("cannot encode back to confirm", err) } - if bytes.Compare(data, data2) == 0 { + if bytes.Equal(data, data2) { fmt.Printf("=======Decode into type %v ========\n", typeName) fmt.Println(anyToString(t, true, 0)) found = true diff --git a/tools/cli/adminDBScanCommand.go b/tools/cli/adminDBScanCommand.go index c0ea9a12e96..5f6c5f1fe5f 100644 --- a/tools/cli/adminDBScanCommand.go +++ b/tools/cli/adminDBScanCommand.go @@ -165,7 +165,7 @@ func AdminDBScanUnsupportedWorkflow(c *cli.Context) { defer outputFile.Close() for i := startShardID; i <= endShardID; i++ { listExecutionsByShardID(c, i, rps, outputFile) - fmt.Println(fmt.Sprintf("Shard %v scan operation is completed.", i)) + fmt.Printf("Shard %v scan operation is completed.\n", i) } } diff --git a/tools/cli/adminDLQCommands.go b/tools/cli/adminDLQCommands.go index bb173f58d9a..0afa2b569e7 100644 --- a/tools/cli/adminDLQCommands.go +++ b/tools/cli/adminDLQCommands.go @@ -107,7 +107,7 @@ func AdminGetDLQMessages(c *cli.Context) { } if showRawTask { - _, err := outputFile.WriteString(fmt.Sprintf("#### REPLICATION DLQ RAW TASKS INFO ####\n")) + _, err := outputFile.WriteString("#### REPLICATION DLQ RAW TASKS INFO ####\n") if err != nil { ErrorAndExit("fail to print dlq raw tasks.", err) } diff --git a/tools/cli/adminElasticSearchCommands.go b/tools/cli/adminElasticSearchCommands.go index 7a56eb1cf24..328da489b64 100644 --- a/tools/cli/adminElasticSearchCommands.go +++ b/tools/cli/adminElasticSearchCommands.go @@ -274,12 +274,8 @@ func generateESDoc(msg *indexer.Message) map[string]interface{} { // This function is used to trim unnecessary tag in returned json for table header func trimBucketKey(k string) string { // group key is in form of "group_key", we only need "key" as the column name - if strings.HasPrefix(k, "group_") { - k = k[6:] - } - if strings.HasPrefix(k, "Attr_") { - k = k[5:] - } + k = strings.TrimPrefix(k, "group_") + k = strings.TrimPrefix(k, "Attr_") return fmt.Sprintf(`%v(*)`, k) } diff --git a/tools/cli/adminKafkaCommands.go b/tools/cli/adminKafkaCommands.go index 82d49688bed..50991c650e2 100644 --- a/tools/cli/adminKafkaCommands.go +++ b/tools/cli/adminKafkaCommands.go @@ -34,7 +34,7 @@ import ( "time" "github.com/urfave/cli" - "go.uber.org/thriftrw/protocol" + "go.uber.org/thriftrw/protocol/binary" "go.uber.org/thriftrw/wire" "github.com/uber/cadence/.gen/go/indexer" @@ -200,16 +200,15 @@ func startParser(readerCh <-chan []byte, writerCh *writerChannel, skipErrors boo var buffer []byte Loop: for { - select { - case data, ok := <-readerCh: - if !ok { - break Loop - } - buffer = append(buffer, data...) - data, nextBuffer := splitBuffer(buffer) - buffer = nextBuffer - parse(data, skipErrors, skippedCount, writerCh) + data, ok := <-readerCh + + if !ok { + break Loop } + buffer = append(buffer, data...) + data, nextBuffer := splitBuffer(buffer) + buffer = nextBuffer + parse(data, skipErrors, skippedCount, writerCh) } parse(buffer, skipErrors, skippedCount, writerCh) } @@ -248,38 +247,37 @@ func writeReplicationTask( filter := buildFilterFn(c.String(FlagWorkflowID), c.String(FlagRunID)) Loop: for { - select { - case task, ok := <-writerCh.ReplicationTaskChannel: - if !ok { - break Loop - } - if filter(task) { - jsonStr, err := decodeReplicationTask(task, serializer) - if err != nil { - if !skipErrMode { - ErrorAndExit(malformedMessage, fmt.Errorf("failed to encode into json, err: %v", err)) - } else { - atomic.AddInt32(skippedCount, 1) - continue Loop - } - } + task, ok := <-writerCh.ReplicationTaskChannel - var outStr string - if !headerMode { - outStr = string(jsonStr) + if !ok { + break Loop + } + if filter(task) { + jsonStr, err := decodeReplicationTask(task, serializer) + if err != nil { + if !skipErrMode { + ErrorAndExit(malformedMessage, fmt.Errorf("failed to encode into json, err: %v", err)) } else { - outStr = fmt.Sprintf( - "%v, %v, %v", - task.GetHistoryTaskV2Attributes().DomainID, - task.GetHistoryTaskV2Attributes().WorkflowID, - task.GetHistoryTaskV2Attributes().RunID, - ) - } - _, err = outputFile.WriteString(fmt.Sprintf("%v\n", outStr)) - if err != nil { - ErrorAndExit("Failed to write to file", fmt.Errorf("err: %v", err)) + atomic.AddInt32(skippedCount, 1) + continue Loop } } + + var outStr string + if !headerMode { + outStr = string(jsonStr) + } else { + outStr = fmt.Sprintf( + "%v, %v, %v", + task.GetHistoryTaskV2Attributes().DomainID, + task.GetHistoryTaskV2Attributes().WorkflowID, + task.GetHistoryTaskV2Attributes().RunID, + ) + } + _, err = outputFile.WriteString(fmt.Sprintf("%v\n", outStr)) + if err != nil { + ErrorAndExit("Failed to write to file", fmt.Errorf("err: %v", err)) + } } } } @@ -295,40 +293,39 @@ func writeVisibilityMessage( filter := buildFilterFnForVisibility(c.String(FlagWorkflowID), c.String(FlagRunID)) Loop: for { - select { - case msg, ok := <-writerCh.VisibilityMsgChannel: - if !ok { - break Loop - } - if filter(msg) { - jsonStr, err := json.Marshal(msg) - if err != nil { - if !skipErrMode { - ErrorAndExit(malformedMessage, fmt.Errorf("failed to encode into json, err: %v", err)) - } else { - atomic.AddInt32(skippedCount, 1) - continue Loop - } - } + msg, ok := <-writerCh.VisibilityMsgChannel - var outStr string - if !headerMode { - outStr = string(jsonStr) + if !ok { + break Loop + } + if filter(msg) { + jsonStr, err := json.Marshal(msg) + if err != nil { + if !skipErrMode { + ErrorAndExit(malformedMessage, fmt.Errorf("failed to encode into json, err: %v", err)) } else { - outStr = fmt.Sprintf( - "%v, %v, %v, %v, %v", - msg.GetDomainID(), - msg.GetWorkflowID(), - msg.GetRunID(), - msg.GetMessageType().String(), - msg.GetVersion(), - ) - } - _, err = outputFile.WriteString(fmt.Sprintf("%v\n", outStr)) - if err != nil { - ErrorAndExit("Failed to write to file", fmt.Errorf("err: %v", err)) + atomic.AddInt32(skippedCount, 1) + continue Loop } } + + var outStr string + if !headerMode { + outStr = string(jsonStr) + } else { + outStr = fmt.Sprintf( + "%v, %v, %v, %v, %v", + msg.GetDomainID(), + msg.GetWorkflowID(), + msg.GetRunID(), + msg.GetMessageType().String(), + msg.GetVersion(), + ) + } + _, err = outputFile.WriteString(fmt.Sprintf("%v\n", outStr)) + if err != nil { + ErrorAndExit("Failed to write to file", fmt.Errorf("err: %v", err)) + } } } } @@ -409,7 +406,7 @@ func deserializeMessages(messages [][]byte, skipErrors bool) ([]*types.Replicati func decode(message []byte, val *replicator.ReplicationTask) error { reader := bytes.NewReader(message[1:]) - wireVal, err := protocol.Binary.Decode(reader, wire.TStruct) + wireVal, err := binary.Default.Decode(reader, wire.TStruct) if err != nil { return err } @@ -437,7 +434,7 @@ func deserializeVisibilityMessages(messages [][]byte, skipErrors bool) ([]*index func decodeVisibility(message []byte, val *indexer.Message) error { reader := bytes.NewReader(message[1:]) - wireVal, err := protocol.Binary.Decode(reader, wire.TStruct) + wireVal, err := binary.Default.Decode(reader, wire.TStruct) if err != nil { return err } diff --git a/tools/cli/adminTimers.go b/tools/cli/adminTimers.go index fedad4e54b8..67a23939dc3 100644 --- a/tools/cli/adminTimers.go +++ b/tools/cli/adminTimers.go @@ -53,7 +53,6 @@ type Reporter struct { timerTypes []int loader LoadCloser printer Printer - timeFormat string } type dbLoadCloser struct { diff --git a/tools/cli/util.go b/tools/cli/util.go index b3b75d9b9c5..4ac7450fe98 100644 --- a/tools/cli/util.go +++ b/tools/cli/util.go @@ -714,7 +714,7 @@ func parseTime(timeStr string, defaultValue int64) int64 { func parseTimeRange(timeRange string) (time.Time, error) { match, err := regexp.MatchString(defaultDateTimeRangeShortRE, timeRange) if !match { // fallback on to check if it's of longer notation - match, err = regexp.MatchString(defaultDateTimeRangeLongRE, timeRange) + _, err = regexp.MatchString(defaultDateTimeRangeLongRE, timeRange) } if err != nil { return time.Time{}, err diff --git a/tools/common/schema/version.go b/tools/common/schema/version.go index ae9018e769d..46616041fc0 100644 --- a/tools/common/schema/version.go +++ b/tools/common/schema/version.go @@ -22,7 +22,6 @@ package schema import ( "fmt" - "io/ioutil" "regexp" "strconv" "strings" @@ -97,30 +96,3 @@ func parseValidateVersion(ver string) (string, error) { } return ver, nil } - -// getExpectedVersion gets the latest version from the schema directory -func getExpectedVersion(dir string) (string, error) { - subdirs, err := ioutil.ReadDir(dir) - if err != nil { - return "", err - } - - var result string - for _, subdir := range subdirs { - if !subdir.IsDir() { - continue - } - dirname := subdir.Name() - if !versionStrRegex.MatchString(dirname) { - continue - } - ver := dirToVersion(dirname) - if len(result) == 0 || cmpVersion(ver, result) > 0 { - result = ver - } - } - if len(result) == 0 { - return "", fmt.Errorf("no valid schemas found in dir: %s", dir) - } - return result, nil -} diff --git a/tools/common/schema/version_test.go b/tools/common/schema/version_test.go index 398f18f2b26..ea2caf7bc28 100644 --- a/tools/common/schema/version_test.go +++ b/tools/common/schema/version_test.go @@ -21,8 +21,6 @@ package schema import ( - "io/ioutil" - "os" "testing" "github.com/stretchr/testify/require" @@ -121,43 +119,3 @@ func (s *VersionTestSuite) execParseTest(input string, expMajor int, expMinor in s.Equal(expMajor, maj) s.Equal(expMinor, min) } - -func (s *VersionTestSuite) TestGetExpectedVersion() { - s.T().Skip() - flags := []struct { - dirs []string - expected string - err string - }{ - {[]string{"1.0"}, "1.0", ""}, - {[]string{"1.0", "2.0"}, "2.0", ""}, - {[]string{"abc"}, "", "no valid schemas"}, - } - for _, flag := range flags { - s.expectedVersionTest(flag.expected, flag.dirs, flag.err) - } -} - -func (s *VersionTestSuite) expectedVersionTest(expected string, dirs []string, errStr string) { - tmpDir, err := ioutil.TempDir("", "version_test") - s.NoError(err) - defer os.RemoveAll(tmpDir) - - for _, dir := range dirs { - s.createSchemaForVersion(tmpDir, dir) - } - v, err := getExpectedVersion(tmpDir) - if len(errStr) == 0 { - s.Equal(expected, v) - } else { - s.Error(err) - s.Contains(err.Error(), errStr) - } -} - -func (s *VersionTestSuite) createSchemaForVersion(subdir string, v string) { - vDir := subdir + "/v" + v - s.NoError(os.Mkdir(vDir, os.FileMode(0744))) - cqlFile := vDir + "/tmp.cql" - s.NoError(ioutil.WriteFile(cqlFile, []byte{}, os.FileMode(0644))) -} diff --git a/tools/sql/handler.go b/tools/sql/handler.go index f4e645d3d48..86e79f1b090 100644 --- a/tools/sql/handler.go +++ b/tools/sql/handler.go @@ -250,7 +250,3 @@ func handleErr(err error) error { log.Println(err) return err } - -func logErr(err error) { - log.Println(err) -}