From 1736590777e0415d94e45014a9dea4b2e615bdc1 Mon Sep 17 00:00:00 2001 From: Yu Xia Date: Tue, 24 Sep 2019 14:46:13 -0700 Subject: [PATCH] Implement NDC events reapplication (#2577) * Wire up remaining events reapply logic. * Add integration test --- .gen/go/history/history.go | 206 ++++++++++++++++- .../go/history/historyserviceclient/client.go | 6 +- .gen/go/history/historyservicefx/client.go | 2 +- .gen/go/history/historyservicefx/doc.go | 2 +- .gen/go/history/historyservicefx/server.go | 2 +- .../go/history/historyserviceserver/server.go | 6 +- .gen/go/history/historyservicetest/client.go | 4 +- .gen/go/replicator/replicator.go | 2 +- .gen/go/shared/shared.go | 10 +- LICENSE | 2 +- client/clientBean.go | 42 ++++ client/clientBean_mock.go | 32 +++ client/history/client.go | 4 +- client/history/metricClient.go | 2 +- client/history/retryableClient.go | 2 +- common/metrics/defs.go | 5 + common/service/service.go | 7 + common/service/serviceTestBase.go | 10 + common/service/serviceinterfaces.go | 4 + host/ndc/nDC_integration_test.go | 213 ++++++++++++++++-- host/onebox.go | 32 +++ host/testcluster.go | 3 + .../ndc_integration_test_clusters.yaml | 4 +- idl/github.com/uber/cadence/history.thrift | 7 +- service/frontend/dcRedirectionHandler_test.go | 2 +- service/frontend/workflowHandler.go | 21 +- service/frontend/workflowHandler_test.go | 45 ++-- service/history/MockHistoryEngine.go | 14 ++ .../history/MockWorkflowExecutionContext.go | 13 ++ service/history/conflictResolver_test.go | 9 +- service/history/handler.go | 37 ++- service/history/historyCache_test.go | 9 +- service/history/historyEngine.go | 60 ++++- service/history/historyEngine2_test.go | 10 +- .../history/historyEngine3_eventsv2_test.go | 10 +- service/history/historyEngineInterfaces.go | 1 + service/history/historyEngine_test.go | 9 +- service/history/historyReplicator_test.go | 9 +- service/history/historyTestBase.go | 13 +- service/history/nDCBranchMgr_test.go | 10 +- service/history/nDCConflictResolver_test.go | 10 +- service/history/nDCEventsReapplier.go | 7 +- service/history/nDCStateRebuilder_test.go | 9 +- service/history/nDCTransactionMgr.go | 2 + ...CTransactionMgrForExistingWorkflow_mock.go | 8 +- ...CTransactionMgrForExistingWorkflow_test.go | 2 +- .../nDCTransactionMgrForNewWorkflow.go | 9 + .../nDCTransactionMgrForNewWorkflow_mock.go | 10 +- .../nDCTransactionMgrForNewWorkflow_test.go | 13 +- service/history/nDCTransactionMgr_mock.go | 8 + service/history/nDCTransactionMgr_test.go | 33 ++- service/history/nDCWorkflowResetter_test.go | 2 +- service/history/nDCWorkflow_test.go | 2 +- service/history/queueAckMgr_test.go | 4 +- .../history/replicatorQueueProcessor_test.go | 2 +- service/history/shardController_test.go | 2 +- service/history/stateBuilder_test.go | 2 +- service/history/taskProcessor_test.go | 2 +- service/history/timerQueueAckMgr_test.go | 4 +- service/history/timerQueueProcessor2_test.go | 2 +- .../history/timerQueueProcessorBase_test.go | 2 +- .../timerQueueStandbyProcessor_test.go | 2 +- .../transferQueueActiveProcessor_test.go | 1 + .../transferQueueStandbyProcessor_test.go | 1 + service/history/workflowExecutionContext.go | 137 ++++++++++- service/history/workflowResetor_test.go | 2 +- 66 files changed, 1024 insertions(+), 135 deletions(-) diff --git a/.gen/go/history/history.go b/.gen/go/history/history.go index b4c267db25b..0be1ee1cf88 100644 --- a/.gen/go/history/history.go +++ b/.gen/go/history/history.go @@ -1,6 +1,6 @@ // The MIT License (MIT) // -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2019 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -3705,6 +3705,192 @@ func (v *QueryWorkflowResponse) IsSetQueryResult() bool { return v != nil && v.QueryResult != nil } +type ReapplyEventsRequest struct { + DomainUUID *string `json:"domainUUID,omitempty"` + Request *shared.ReapplyEventsRequest `json:"request,omitempty"` +} + +// ToWire translates a ReapplyEventsRequest struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *ReapplyEventsRequest) ToWire() (wire.Value, error) { + var ( + fields [2]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.DomainUUID != nil { + w, err = wire.NewValueString(*(v.DomainUUID)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + if v.Request != nil { + w, err = v.Request.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 20, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _ReapplyEventsRequest_Read(w wire.Value) (*shared.ReapplyEventsRequest, error) { + var v shared.ReapplyEventsRequest + err := v.FromWire(w) + return &v, err +} + +// FromWire deserializes a ReapplyEventsRequest struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a ReapplyEventsRequest struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v ReapplyEventsRequest +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *ReapplyEventsRequest) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TBinary { + var x string + x, err = field.Value.GetString(), error(nil) + v.DomainUUID = &x + if err != nil { + return err + } + + } + case 20: + if field.Value.Type() == wire.TStruct { + v.Request, err = _ReapplyEventsRequest_Read(field.Value) + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a ReapplyEventsRequest +// struct. +func (v *ReapplyEventsRequest) String() string { + if v == nil { + return "" + } + + var fields [2]string + i := 0 + if v.DomainUUID != nil { + fields[i] = fmt.Sprintf("DomainUUID: %v", *(v.DomainUUID)) + i++ + } + if v.Request != nil { + fields[i] = fmt.Sprintf("Request: %v", v.Request) + i++ + } + + return fmt.Sprintf("ReapplyEventsRequest{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this ReapplyEventsRequest match the +// provided ReapplyEventsRequest. +// +// This function performs a deep comparison. +func (v *ReapplyEventsRequest) Equals(rhs *ReapplyEventsRequest) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !_String_EqualsPtr(v.DomainUUID, rhs.DomainUUID) { + return false + } + if !((v.Request == nil && rhs.Request == nil) || (v.Request != nil && rhs.Request != nil && v.Request.Equals(rhs.Request))) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of ReapplyEventsRequest. +func (v *ReapplyEventsRequest) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.DomainUUID != nil { + enc.AddString("domainUUID", *v.DomainUUID) + } + if v.Request != nil { + err = multierr.Append(err, enc.AddObject("request", v.Request)) + } + return err +} + +// GetDomainUUID returns the value of DomainUUID if it is set or its +// zero value if it is unset. +func (v *ReapplyEventsRequest) GetDomainUUID() (o string) { + if v != nil && v.DomainUUID != nil { + return *v.DomainUUID + } + + return +} + +// IsSetDomainUUID returns true if DomainUUID is not nil. +func (v *ReapplyEventsRequest) IsSetDomainUUID() bool { + return v != nil && v.DomainUUID != nil +} + +// GetRequest returns the value of Request if it is set or its +// zero value if it is unset. +func (v *ReapplyEventsRequest) GetRequest() (o *shared.ReapplyEventsRequest) { + if v != nil && v.Request != nil { + return v.Request + } + + return +} + +// IsSetRequest returns true if Request is not nil. +func (v *ReapplyEventsRequest) IsSetRequest() bool { + return v != nil && v.Request != nil +} + type RecordActivityTaskHeartbeatRequest struct { DomainUUID *string `json:"domainUUID,omitempty"` HeartbeatRequest *shared.RecordActivityTaskHeartbeatRequest `json:"heartbeatRequest,omitempty"` @@ -12140,7 +12326,7 @@ var ThriftModule = &thriftreflect.ThriftModule{ Name: "history", Package: "github.com/uber/cadence/.gen/go/history", FilePath: "history.thrift", - SHA1: "ce5eb8d365d70e410d0c32a8a6bf86d2b924b122", + SHA1: "ab284313a8c4213d10b21312e684a435c0ea2fe4", Includes: []*thriftreflect.ThriftModule{ replicator.ThriftModule, shared.ThriftModule, @@ -12148,7 +12334,7 @@ var ThriftModule = &thriftreflect.ThriftModule{ Raw: rawIDL, } -const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\ninclude \"shared.thrift\"\ninclude \"replicator.thrift\"\n\nnamespace java com.uber.cadence.history\n\nexception EventAlreadyStartedError {\n 1: required string message\n}\n\nexception ShardOwnershipLostError {\n 10: optional string message\n 20: optional string owner\n}\n\nstruct ParentExecutionInfo {\n 10: optional string domainUUID\n 15: optional string domain\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") initiatedId\n}\n\nstruct StartWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.StartWorkflowExecutionRequest startRequest\n 30: optional ParentExecutionInfo parentExecutionInfo\n 40: optional i32 attempt\n 50: optional i64 (js.type = \"Long\") expirationTimestamp\n 55: optional shared.ContinueAsNewInitiator continueAsNewInitiator\n 56: optional string continuedFailureReason\n 57: optional binary continuedFailureDetails\n 58: optional binary lastCompletionResult\n 60: optional i32 firstDecisionTaskBackoffSeconds\n}\n\nstruct DescribeMutableStateRequest{\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n}\n\nstruct DescribeMutableStateResponse{\n 30: optional string mutableStateInCache\n 40: optional string mutableStateInDatabase\n}\n\nstruct GetMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") expectedNextEventId\n 40: optional binary currentBranchToken\n}\n\nstruct GetMutableStateResponse {\n 10: optional shared.WorkflowExecution execution\n 20: optional shared.WorkflowType workflowType\n 30: optional i64 (js.type = \"Long\") NextEventId\n 35: optional i64 (js.type = \"Long\") PreviousStartedEventId\n 40: optional i64 (js.type = \"Long\") LastFirstEventId\n 50: optional shared.TaskList taskList\n 60: optional shared.TaskList stickyTaskList\n 70: optional string clientLibraryVersion\n 80: optional string clientFeatureVersion\n 90: optional string clientImpl\n //TODO: isWorkflowRunning is deprecating. workflowState is going replace this field\n 100: optional bool isWorkflowRunning\n 110: optional i32 stickyTaskListScheduleToStartTimeout\n 120: optional i32 eventStoreVersion\n 130: optional binary currentBranchToken\n 140: optional map replicationInfo\n // TODO: when migrating to gRPC, make this a enum\n // TODO: when migrating to gRPC, unify internal & external representation\n // NOTE: workflowState & workflowCloseState are the same as persistence representation\n 150: optional i32 workflowState\n 160: optional i32 workflowCloseState\n 170: optional shared.VersionHistories versionHistories\n}\n\nstruct PollMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") expectedNextEventId\n 40: optional binary currentBranchToken\n}\n\nstruct PollMutableStateResponse {\n 10: optional shared.WorkflowExecution execution\n 20: optional shared.WorkflowType workflowType\n 30: optional i64 (js.type = \"Long\") NextEventId\n 35: optional i64 (js.type = \"Long\") PreviousStartedEventId\n 40: optional i64 (js.type = \"Long\") LastFirstEventId\n 50: optional shared.TaskList taskList\n 60: optional shared.TaskList stickyTaskList\n 70: optional string clientLibraryVersion\n 80: optional string clientFeatureVersion\n 90: optional string clientImpl\n 100: optional i32 stickyTaskListScheduleToStartTimeout\n 110: optional binary currentBranchToken\n 120: optional map replicationInfo\n 130: optional shared.VersionHistories versionHistories\n // TODO: when migrating to gRPC, make this a enum\n // TODO: when migrating to gRPC, unify internal & external representation\n // NOTE: workflowState & workflowCloseState are the same as persistence representation\n 140: optional i32 workflowState\n 150: optional i32 workflowCloseState\n}\n\nstruct ResetStickyTaskListRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n}\n\nstruct ResetStickyTaskListResponse {\n // The reason to keep this response is to allow returning\n // information in the future.\n}\n\nstruct RespondDecisionTaskCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondDecisionTaskCompletedRequest completeRequest\n}\n\nstruct RespondDecisionTaskCompletedResponse {\n 10: optional RecordDecisionTaskStartedResponse startedResponse\n}\n\nstruct RespondDecisionTaskFailedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondDecisionTaskFailedRequest failedRequest\n}\n\nstruct RecordActivityTaskHeartbeatRequest {\n 10: optional string domainUUID\n 20: optional shared.RecordActivityTaskHeartbeatRequest heartbeatRequest\n}\n\nstruct RespondActivityTaskCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskCompletedRequest completeRequest\n}\n\nstruct RespondActivityTaskFailedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskFailedRequest failedRequest\n}\n\nstruct RespondActivityTaskCanceledRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskCanceledRequest cancelRequest\n}\n\nstruct RecordActivityTaskStartedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") scheduleId\n 40: optional i64 (js.type = \"Long\") taskId\n 45: optional string requestId // Unique id of each poll request. Used to ensure at most once delivery of tasks.\n 50: optional shared.PollForActivityTaskRequest pollRequest\n}\n\nstruct RecordActivityTaskStartedResponse {\n 20: optional shared.HistoryEvent scheduledEvent\n 30: optional i64 (js.type = \"Long\") startedTimestamp\n 40: optional i64 (js.type = \"Long\") attempt\n 50: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 60: optional binary heartbeatDetails\n 70: optional shared.WorkflowType workflowType\n 80: optional string workflowDomain\n}\n\nstruct RecordDecisionTaskStartedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") scheduleId\n 40: optional i64 (js.type = \"Long\") taskId\n 45: optional string requestId // Unique id of each poll request. Used to ensure at most once delivery of tasks.\n 50: optional shared.PollForDecisionTaskRequest pollRequest\n}\n\nstruct RecordDecisionTaskStartedResponse {\n 10: optional shared.WorkflowType workflowType\n 20: optional i64 (js.type = \"Long\") previousStartedEventId\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") attempt\n 70: optional bool stickyExecutionEnabled\n 80: optional shared.TransientDecisionInfo decisionInfo\n 90: optional shared.TaskList WorkflowExecutionTaskList\n 100: optional i32 eventStoreVersion\n 110: optional binary branchToken\n 120: optional i64 (js.type = \"Long\") scheduledTimestamp\n 130: optional i64 (js.type = \"Long\") startedTimestamp\n}\n\nstruct SignalWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.SignalWorkflowExecutionRequest signalRequest\n 30: optional shared.WorkflowExecution externalWorkflowExecution\n 40: optional bool childWorkflowOnly\n}\n\nstruct SignalWithStartWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.SignalWithStartWorkflowExecutionRequest signalWithStartRequest\n}\n\nstruct RemoveSignalMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional string requestId\n}\n\nstruct TerminateWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.TerminateWorkflowExecutionRequest terminateRequest\n}\n\nstruct ResetWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.ResetWorkflowExecutionRequest resetRequest\n}\n\nstruct RequestCancelWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.RequestCancelWorkflowExecutionRequest cancelRequest\n 30: optional i64 (js.type = \"Long\") externalInitiatedEventId\n 40: optional shared.WorkflowExecution externalWorkflowExecution\n 50: optional bool childWorkflowOnly\n}\n\nstruct ScheduleDecisionTaskRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional bool isFirstDecision\n}\n\nstruct DescribeWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.DescribeWorkflowExecutionRequest request\n}\n\n/**\n* RecordChildExecutionCompletedRequest is used for reporting the completion of child execution to parent workflow\n* execution which started it. When a child execution is completed it creates this request and calls the\n* RecordChildExecutionCompleted API with the workflowExecution of parent. It also sets the completedExecution of the\n* child as it could potentially be different than the ChildExecutionStartedEvent of parent in the situation when\n* child creates multiple runs through ContinueAsNew before finally completing.\n**/\nstruct RecordChildExecutionCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") initiatedId\n 40: optional shared.WorkflowExecution completedExecution\n 50: optional shared.HistoryEvent completionEvent\n}\n\nstruct ReplicateEventsRequest {\n 10: optional string sourceCluster\n 20: optional string domainUUID\n 30: optional shared.WorkflowExecution workflowExecution\n 40: optional i64 (js.type = \"Long\") firstEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") version\n 70: optional map replicationInfo\n 80: optional shared.History history\n 90: optional shared.History newRunHistory\n 100: optional bool forceBufferEvents // this attribute is deprecated\n 110: optional i32 eventStoreVersion\n 120: optional i32 newRunEventStoreVersion\n 130: optional bool resetWorkflow\n 140: optional bool newRunNDC\n}\n\nstruct ReplicateRawEventsRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional map replicationInfo\n 40: optional shared.DataBlob history\n 50: optional shared.DataBlob newRunHistory\n 60: optional i32 eventStoreVersion\n 70: optional i32 newRunEventStoreVersion\n}\n\nstruct ReplicateEventsV2Request {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional list versionHistoryItems\n 40: optional shared.DataBlob events\n // new run events does not need version history since there is no prior events\n 60: optional shared.DataBlob newRunEvents\n 70: optional bool resetWorkflow\n}\n\nstruct SyncShardStatusRequest {\n 10: optional string sourceCluster\n 20: optional i64 (js.type = \"Long\") shardId\n 30: optional i64 (js.type = \"Long\") timestamp\n}\n\nstruct SyncActivityRequest {\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") version\n 50: optional i64 (js.type = \"Long\") scheduledId\n 60: optional i64 (js.type = \"Long\") scheduledTime\n 70: optional i64 (js.type = \"Long\") startedId\n 80: optional i64 (js.type = \"Long\") startedTime\n 90: optional i64 (js.type = \"Long\") lastHeartbeatTime\n 100: optional binary details\n 110: optional i32 attempt\n 120: optional string lastFailureReason\n 130: optional string lastWorkerIdentity\n 140: optional binary lastFailureDetails\n}\n\nstruct QueryWorkflowRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n 30: optional shared.WorkflowQuery query\n}\n\nstruct QueryWorkflowResponse {\n 10: optional binary queryResult\n}\n\n/**\n* HistoryService provides API to start a new long running workflow instance, as well as query and update the history\n* of workflow instances already created.\n**/\nservice HistoryService {\n /**\n * StartWorkflowExecution starts a new long running workflow instance. It will create the instance with\n * 'WorkflowExecutionStarted' event in history and also schedule the first DecisionTask for the worker to make the\n * first decision for this instance. It will return 'WorkflowExecutionAlreadyStartedError', if an instance already\n * exists with same workflowId.\n **/\n shared.StartWorkflowExecutionResponse StartWorkflowExecution(1: StartWorkflowExecutionRequest startRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * Returns the information from mutable state of workflow execution.\n * It fails with 'EntityNotExistError' if specified workflow execution in unknown to the service.\n * It returns CurrentBranchChangedError if the workflow version branch has changed.\n **/\n GetMutableStateResponse GetMutableState(1: GetMutableStateRequest getRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.CurrentBranchChangedError currentBranchChangedError,\n )\n\n /**\n * Returns the information from mutable state of workflow execution.\n * It fails with 'EntityNotExistError' if specified workflow execution in unknown to the service.\n * It returns CurrentBranchChangedError if the workflow version branch has changed.\n **/\n PollMutableStateResponse PollMutableState(1: PollMutableStateRequest pollRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.CurrentBranchChangedError currentBranchChangedError,\n )\n\n /**\n * Reset the sticky tasklist related information in mutable state of a given workflow.\n * Things cleared are:\n * 1. StickyTaskList\n * 2. StickyScheduleToStartTimeout\n * 3. ClientLibraryVersion\n * 4. ClientFeatureVersion\n * 5. ClientImpl\n **/\n ResetStickyTaskListResponse ResetStickyTaskList(1: ResetStickyTaskListRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordDecisionTaskStarted is called by the Matchingservice before it hands a decision task to the application worker in response to\n * a PollForDecisionTask call. It records in the history the event that the decision task has started. It will return 'EventAlreadyStartedError',\n * if the workflow's execution history already includes a record of the event starting.\n **/\n RecordDecisionTaskStartedResponse RecordDecisionTaskStarted(1: RecordDecisionTaskStartedRequest addRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: EventAlreadyStartedError eventAlreadyStartedError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordActivityTaskStarted is called by the Matchingservice before it hands a decision task to the application worker in response to\n * a PollForActivityTask call. It records in the history the event that the decision task has started. It will return 'EventAlreadyStartedError',\n * if the workflow's execution history already includes a record of the event starting.\n **/\n RecordActivityTaskStartedResponse RecordActivityTaskStarted(1: RecordActivityTaskStartedRequest addRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: EventAlreadyStartedError eventAlreadyStartedError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondDecisionTaskCompleted is called by application worker to complete a DecisionTask handed as a result of\n * 'PollForDecisionTask' API call. Completing a DecisionTask will result in new events for the workflow execution and\n * potentially new ActivityTask being created for corresponding decisions. It will also create a DecisionTaskCompleted\n * event in the history for that session. Use the 'taskToken' provided as response of PollForDecisionTask API call\n * for completing the DecisionTask.\n **/\n RespondDecisionTaskCompletedResponse RespondDecisionTaskCompleted(1: RespondDecisionTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondDecisionTaskFailed is called by application worker to indicate failure. This results in\n * DecisionTaskFailedEvent written to the history and a new DecisionTask created. This API can be used by client to\n * either clear sticky tasklist or report ny panics during DecisionTask processing.\n **/\n void RespondDecisionTaskFailed(1: RespondDecisionTaskFailedRequest failedRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordActivityTaskHeartbeat is called by application worker while it is processing an ActivityTask. If worker fails\n * to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and\n * 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeat' will\n * fail with 'EntityNotExistsError' in such situations. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for heartbeating.\n **/\n shared.RecordActivityTaskHeartbeatResponse RecordActivityTaskHeartbeat(1: RecordActivityTaskHeartbeatRequest heartbeatRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskCompleted is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new DecisionTask\n * created for the workflow so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCompleted(1: RespondActivityTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskFailed is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskFailed' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskFailed(1: RespondActivityTaskFailedRequest failRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskCanceled is called by application worker when it is successfully canceled an ActivityTask. It will\n * result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCanceled(1: RespondActivityTaskCanceledRequest canceledRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in\n * WorkflowExecutionSignaled event recorded in the history and a decision task being created for the execution.\n **/\n void SignalWorkflowExecution(1: SignalWorkflowExecutionRequest signalRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.LimitExceededError limitExceededError,\n )\n\n /**\n * SignalWithStartWorkflowExecution is used to ensure sending a signal event to a workflow execution.\n * If workflow is running, this results in WorkflowExecutionSignaled event recorded in the history\n * and a decision task being created for the execution.\n * If workflow is not running or not found, it will first try start workflow with given WorkflowIDResuePolicy,\n * and record WorkflowExecutionStarted and WorkflowExecutionSignaled event in case of success.\n * It will return `WorkflowExecutionAlreadyStartedError` if start workflow failed with given policy.\n **/\n shared.StartWorkflowExecutionResponse SignalWithStartWorkflowExecution(1: SignalWithStartWorkflowExecutionRequest signalWithStartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: ShardOwnershipLostError shardOwnershipLostError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.WorkflowExecutionAlreadyStartedError workflowAlreadyStartedError,\n )\n\n /**\n * RemoveSignalMutableState is used to remove a signal request ID that was previously recorded. This is currently\n * used to clean execution info when signal decision finished.\n **/\n void RemoveSignalMutableState(1: RemoveSignalMutableStateRequest removeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * TerminateWorkflowExecution terminates an existing workflow execution by recording WorkflowExecutionTerminated event\n * in the history and immediately terminating the execution instance.\n **/\n void TerminateWorkflowExecution(1: TerminateWorkflowExecutionRequest terminateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * ResetWorkflowExecution reset an existing workflow execution by a firstEventID of a existing event batch\n * in the history and immediately terminating the current execution instance.\n * After reset, the history will grow from nextFirstEventID.\n **/\n shared.ResetWorkflowExecutionResponse ResetWorkflowExecution(1: ResetWorkflowExecutionRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RequestCancelWorkflowExecution is called by application worker when it wants to request cancellation of a workflow instance.\n * It will result in a new 'WorkflowExecutionCancelRequested' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. It fails with 'EntityNotExistsError' if the workflow is not valid\n * anymore due to completion or doesn't exist.\n **/\n void RequestCancelWorkflowExecution(1: RequestCancelWorkflowExecutionRequest cancelRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.CancellationAlreadyRequestedError cancellationAlreadyRequestedError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * ScheduleDecisionTask is used for creating a decision task for already started workflow execution. This is mainly\n * used by transfer queue processor during the processing of StartChildWorkflowExecution task, where it first starts\n * child execution without creating the decision task and then calls this API after updating the mutable state of\n * parent execution.\n **/\n void ScheduleDecisionTask(1: ScheduleDecisionTaskRequest scheduleRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordChildExecutionCompleted is used for reporting the completion of child workflow execution to parent.\n * This is mainly called by transfer queue processor during the processing of DeleteExecution task.\n **/\n void RecordChildExecutionCompleted(1: RecordChildExecutionCompletedRequest completionRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * DescribeWorkflowExecution returns information about the specified workflow execution.\n **/\n shared.DescribeWorkflowExecutionResponse DescribeWorkflowExecution(1: DescribeWorkflowExecutionRequest describeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateEvents(1: ReplicateEventsRequest replicateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskError retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateRawEvents(1: ReplicateRawEventsRequest replicateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskError retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateEventsV2(1: ReplicateEventsV2Request replicateV2Request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskV2Error retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SyncShardStatus sync the status between shards\n **/\n void SyncShardStatus(1: SyncShardStatusRequest syncShardStatusRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SyncActivity sync the activity status\n **/\n void SyncActivity(1: SyncActivityRequest syncActivityRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.RetryTaskError retryTaskError,\n )\n\n /**\n * DescribeMutableState returns information about the internal states of workflow mutable state.\n **/\n DescribeMutableStateResponse DescribeMutableState(1: DescribeMutableStateRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.AccessDeniedError accessDeniedError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.LimitExceededError limitExceededError,\n )\n\n /**\n * DescribeHistoryHost returns information about the internal states of a history host\n **/\n shared.DescribeHistoryHostResponse DescribeHistoryHost(1: shared.DescribeHistoryHostRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * CloseShard close the shard\n **/\n void CloseShard(1: shared.CloseShardRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RemoveTask remove task based on type, taskid, shardid\n **/\n void RemoveTask(1: shared.RemoveTaskRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n replicator.GetReplicationMessagesResponse GetReplicationMessages(1: replicator.GetReplicationMessagesRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.LimitExceededError limitExceededError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * QueryWorkflow returns query result for a specified workflow execution\n **/\n QueryWorkflowResponse QueryWorkflow(1: QueryWorkflowRequest queryRequest)\n\tthrows (\n\t 1: shared.BadRequestError badRequestError,\n\t 2: shared.InternalServiceError internalServiceError,\n\t 3: shared.EntityNotExistsError entityNotExistError,\n\t 4: shared.QueryFailedError queryFailedError,\n\t 5: shared.LimitExceededError limitExceededError,\n\t 6: shared.ServiceBusyError serviceBusyError,\n\t 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n\t)\n\n /**\n * ReapplyEvents applies stale events to the current workflow and current run\n **/\n void ReapplyEvents(1: shared.ReapplyEventsRequest reapplyEventsRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.DomainNotActiveError domainNotActiveError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: ShardOwnershipLostError shardOwnershipLostError,\n 7: shared.EntityNotExistsError entityNotExistError,\n )\n}\n" +const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\ninclude \"shared.thrift\"\ninclude \"replicator.thrift\"\n\nnamespace java com.uber.cadence.history\n\nexception EventAlreadyStartedError {\n 1: required string message\n}\n\nexception ShardOwnershipLostError {\n 10: optional string message\n 20: optional string owner\n}\n\nstruct ParentExecutionInfo {\n 10: optional string domainUUID\n 15: optional string domain\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") initiatedId\n}\n\nstruct StartWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.StartWorkflowExecutionRequest startRequest\n 30: optional ParentExecutionInfo parentExecutionInfo\n 40: optional i32 attempt\n 50: optional i64 (js.type = \"Long\") expirationTimestamp\n 55: optional shared.ContinueAsNewInitiator continueAsNewInitiator\n 56: optional string continuedFailureReason\n 57: optional binary continuedFailureDetails\n 58: optional binary lastCompletionResult\n 60: optional i32 firstDecisionTaskBackoffSeconds\n}\n\nstruct DescribeMutableStateRequest{\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n}\n\nstruct DescribeMutableStateResponse{\n 30: optional string mutableStateInCache\n 40: optional string mutableStateInDatabase\n}\n\nstruct GetMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") expectedNextEventId\n 40: optional binary currentBranchToken\n}\n\nstruct GetMutableStateResponse {\n 10: optional shared.WorkflowExecution execution\n 20: optional shared.WorkflowType workflowType\n 30: optional i64 (js.type = \"Long\") NextEventId\n 35: optional i64 (js.type = \"Long\") PreviousStartedEventId\n 40: optional i64 (js.type = \"Long\") LastFirstEventId\n 50: optional shared.TaskList taskList\n 60: optional shared.TaskList stickyTaskList\n 70: optional string clientLibraryVersion\n 80: optional string clientFeatureVersion\n 90: optional string clientImpl\n //TODO: isWorkflowRunning is deprecating. workflowState is going replace this field\n 100: optional bool isWorkflowRunning\n 110: optional i32 stickyTaskListScheduleToStartTimeout\n 120: optional i32 eventStoreVersion\n 130: optional binary currentBranchToken\n 140: optional map replicationInfo\n // TODO: when migrating to gRPC, make this a enum\n // TODO: when migrating to gRPC, unify internal & external representation\n // NOTE: workflowState & workflowCloseState are the same as persistence representation\n 150: optional i32 workflowState\n 160: optional i32 workflowCloseState\n 170: optional shared.VersionHistories versionHistories\n}\n\nstruct PollMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") expectedNextEventId\n 40: optional binary currentBranchToken\n}\n\nstruct PollMutableStateResponse {\n 10: optional shared.WorkflowExecution execution\n 20: optional shared.WorkflowType workflowType\n 30: optional i64 (js.type = \"Long\") NextEventId\n 35: optional i64 (js.type = \"Long\") PreviousStartedEventId\n 40: optional i64 (js.type = \"Long\") LastFirstEventId\n 50: optional shared.TaskList taskList\n 60: optional shared.TaskList stickyTaskList\n 70: optional string clientLibraryVersion\n 80: optional string clientFeatureVersion\n 90: optional string clientImpl\n 100: optional i32 stickyTaskListScheduleToStartTimeout\n 110: optional binary currentBranchToken\n 120: optional map replicationInfo\n 130: optional shared.VersionHistories versionHistories\n // TODO: when migrating to gRPC, make this a enum\n // TODO: when migrating to gRPC, unify internal & external representation\n // NOTE: workflowState & workflowCloseState are the same as persistence representation\n 140: optional i32 workflowState\n 150: optional i32 workflowCloseState\n}\n\nstruct ResetStickyTaskListRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n}\n\nstruct ResetStickyTaskListResponse {\n // The reason to keep this response is to allow returning\n // information in the future.\n}\n\nstruct RespondDecisionTaskCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondDecisionTaskCompletedRequest completeRequest\n}\n\nstruct RespondDecisionTaskCompletedResponse {\n 10: optional RecordDecisionTaskStartedResponse startedResponse\n}\n\nstruct RespondDecisionTaskFailedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondDecisionTaskFailedRequest failedRequest\n}\n\nstruct RecordActivityTaskHeartbeatRequest {\n 10: optional string domainUUID\n 20: optional shared.RecordActivityTaskHeartbeatRequest heartbeatRequest\n}\n\nstruct RespondActivityTaskCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskCompletedRequest completeRequest\n}\n\nstruct RespondActivityTaskFailedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskFailedRequest failedRequest\n}\n\nstruct RespondActivityTaskCanceledRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskCanceledRequest cancelRequest\n}\n\nstruct RecordActivityTaskStartedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") scheduleId\n 40: optional i64 (js.type = \"Long\") taskId\n 45: optional string requestId // Unique id of each poll request. Used to ensure at most once delivery of tasks.\n 50: optional shared.PollForActivityTaskRequest pollRequest\n}\n\nstruct RecordActivityTaskStartedResponse {\n 20: optional shared.HistoryEvent scheduledEvent\n 30: optional i64 (js.type = \"Long\") startedTimestamp\n 40: optional i64 (js.type = \"Long\") attempt\n 50: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 60: optional binary heartbeatDetails\n 70: optional shared.WorkflowType workflowType\n 80: optional string workflowDomain\n}\n\nstruct RecordDecisionTaskStartedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") scheduleId\n 40: optional i64 (js.type = \"Long\") taskId\n 45: optional string requestId // Unique id of each poll request. Used to ensure at most once delivery of tasks.\n 50: optional shared.PollForDecisionTaskRequest pollRequest\n}\n\nstruct RecordDecisionTaskStartedResponse {\n 10: optional shared.WorkflowType workflowType\n 20: optional i64 (js.type = \"Long\") previousStartedEventId\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") attempt\n 70: optional bool stickyExecutionEnabled\n 80: optional shared.TransientDecisionInfo decisionInfo\n 90: optional shared.TaskList WorkflowExecutionTaskList\n 100: optional i32 eventStoreVersion\n 110: optional binary branchToken\n 120: optional i64 (js.type = \"Long\") scheduledTimestamp\n 130: optional i64 (js.type = \"Long\") startedTimestamp\n}\n\nstruct SignalWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.SignalWorkflowExecutionRequest signalRequest\n 30: optional shared.WorkflowExecution externalWorkflowExecution\n 40: optional bool childWorkflowOnly\n}\n\nstruct SignalWithStartWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.SignalWithStartWorkflowExecutionRequest signalWithStartRequest\n}\n\nstruct RemoveSignalMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional string requestId\n}\n\nstruct TerminateWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.TerminateWorkflowExecutionRequest terminateRequest\n}\n\nstruct ResetWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.ResetWorkflowExecutionRequest resetRequest\n}\n\nstruct RequestCancelWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.RequestCancelWorkflowExecutionRequest cancelRequest\n 30: optional i64 (js.type = \"Long\") externalInitiatedEventId\n 40: optional shared.WorkflowExecution externalWorkflowExecution\n 50: optional bool childWorkflowOnly\n}\n\nstruct ScheduleDecisionTaskRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional bool isFirstDecision\n}\n\nstruct DescribeWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.DescribeWorkflowExecutionRequest request\n}\n\n/**\n* RecordChildExecutionCompletedRequest is used for reporting the completion of child execution to parent workflow\n* execution which started it. When a child execution is completed it creates this request and calls the\n* RecordChildExecutionCompleted API with the workflowExecution of parent. It also sets the completedExecution of the\n* child as it could potentially be different than the ChildExecutionStartedEvent of parent in the situation when\n* child creates multiple runs through ContinueAsNew before finally completing.\n**/\nstruct RecordChildExecutionCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") initiatedId\n 40: optional shared.WorkflowExecution completedExecution\n 50: optional shared.HistoryEvent completionEvent\n}\n\nstruct ReplicateEventsRequest {\n 10: optional string sourceCluster\n 20: optional string domainUUID\n 30: optional shared.WorkflowExecution workflowExecution\n 40: optional i64 (js.type = \"Long\") firstEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") version\n 70: optional map replicationInfo\n 80: optional shared.History history\n 90: optional shared.History newRunHistory\n 100: optional bool forceBufferEvents // this attribute is deprecated\n 110: optional i32 eventStoreVersion\n 120: optional i32 newRunEventStoreVersion\n 130: optional bool resetWorkflow\n 140: optional bool newRunNDC\n}\n\nstruct ReplicateRawEventsRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional map replicationInfo\n 40: optional shared.DataBlob history\n 50: optional shared.DataBlob newRunHistory\n 60: optional i32 eventStoreVersion\n 70: optional i32 newRunEventStoreVersion\n}\n\nstruct ReplicateEventsV2Request {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional list versionHistoryItems\n 40: optional shared.DataBlob events\n // new run events does not need version history since there is no prior events\n 60: optional shared.DataBlob newRunEvents\n 70: optional bool resetWorkflow\n}\n\nstruct SyncShardStatusRequest {\n 10: optional string sourceCluster\n 20: optional i64 (js.type = \"Long\") shardId\n 30: optional i64 (js.type = \"Long\") timestamp\n}\n\nstruct SyncActivityRequest {\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") version\n 50: optional i64 (js.type = \"Long\") scheduledId\n 60: optional i64 (js.type = \"Long\") scheduledTime\n 70: optional i64 (js.type = \"Long\") startedId\n 80: optional i64 (js.type = \"Long\") startedTime\n 90: optional i64 (js.type = \"Long\") lastHeartbeatTime\n 100: optional binary details\n 110: optional i32 attempt\n 120: optional string lastFailureReason\n 130: optional string lastWorkerIdentity\n 140: optional binary lastFailureDetails\n}\n\nstruct QueryWorkflowRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n 30: optional shared.WorkflowQuery query\n}\n\nstruct QueryWorkflowResponse {\n 10: optional binary queryResult\n}\n\nstruct ReapplyEventsRequest {\n 10: optional string domainUUID\n 20: optional shared.ReapplyEventsRequest request\n}\n\n/**\n* HistoryService provides API to start a new long running workflow instance, as well as query and update the history\n* of workflow instances already created.\n**/\nservice HistoryService {\n /**\n * StartWorkflowExecution starts a new long running workflow instance. It will create the instance with\n * 'WorkflowExecutionStarted' event in history and also schedule the first DecisionTask for the worker to make the\n * first decision for this instance. It will return 'WorkflowExecutionAlreadyStartedError', if an instance already\n * exists with same workflowId.\n **/\n shared.StartWorkflowExecutionResponse StartWorkflowExecution(1: StartWorkflowExecutionRequest startRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * Returns the information from mutable state of workflow execution.\n * It fails with 'EntityNotExistError' if specified workflow execution in unknown to the service.\n * It returns CurrentBranchChangedError if the workflow version branch has changed.\n **/\n GetMutableStateResponse GetMutableState(1: GetMutableStateRequest getRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.CurrentBranchChangedError currentBranchChangedError,\n )\n\n /**\n * Returns the information from mutable state of workflow execution.\n * It fails with 'EntityNotExistError' if specified workflow execution in unknown to the service.\n * It returns CurrentBranchChangedError if the workflow version branch has changed.\n **/\n PollMutableStateResponse PollMutableState(1: PollMutableStateRequest pollRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.CurrentBranchChangedError currentBranchChangedError,\n )\n\n /**\n * Reset the sticky tasklist related information in mutable state of a given workflow.\n * Things cleared are:\n * 1. StickyTaskList\n * 2. StickyScheduleToStartTimeout\n * 3. ClientLibraryVersion\n * 4. ClientFeatureVersion\n * 5. ClientImpl\n **/\n ResetStickyTaskListResponse ResetStickyTaskList(1: ResetStickyTaskListRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordDecisionTaskStarted is called by the Matchingservice before it hands a decision task to the application worker in response to\n * a PollForDecisionTask call. It records in the history the event that the decision task has started. It will return 'EventAlreadyStartedError',\n * if the workflow's execution history already includes a record of the event starting.\n **/\n RecordDecisionTaskStartedResponse RecordDecisionTaskStarted(1: RecordDecisionTaskStartedRequest addRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: EventAlreadyStartedError eventAlreadyStartedError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordActivityTaskStarted is called by the Matchingservice before it hands a decision task to the application worker in response to\n * a PollForActivityTask call. It records in the history the event that the decision task has started. It will return 'EventAlreadyStartedError',\n * if the workflow's execution history already includes a record of the event starting.\n **/\n RecordActivityTaskStartedResponse RecordActivityTaskStarted(1: RecordActivityTaskStartedRequest addRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: EventAlreadyStartedError eventAlreadyStartedError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondDecisionTaskCompleted is called by application worker to complete a DecisionTask handed as a result of\n * 'PollForDecisionTask' API call. Completing a DecisionTask will result in new events for the workflow execution and\n * potentially new ActivityTask being created for corresponding decisions. It will also create a DecisionTaskCompleted\n * event in the history for that session. Use the 'taskToken' provided as response of PollForDecisionTask API call\n * for completing the DecisionTask.\n **/\n RespondDecisionTaskCompletedResponse RespondDecisionTaskCompleted(1: RespondDecisionTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondDecisionTaskFailed is called by application worker to indicate failure. This results in\n * DecisionTaskFailedEvent written to the history and a new DecisionTask created. This API can be used by client to\n * either clear sticky tasklist or report ny panics during DecisionTask processing.\n **/\n void RespondDecisionTaskFailed(1: RespondDecisionTaskFailedRequest failedRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordActivityTaskHeartbeat is called by application worker while it is processing an ActivityTask. If worker fails\n * to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and\n * 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeat' will\n * fail with 'EntityNotExistsError' in such situations. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for heartbeating.\n **/\n shared.RecordActivityTaskHeartbeatResponse RecordActivityTaskHeartbeat(1: RecordActivityTaskHeartbeatRequest heartbeatRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskCompleted is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new DecisionTask\n * created for the workflow so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCompleted(1: RespondActivityTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskFailed is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskFailed' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskFailed(1: RespondActivityTaskFailedRequest failRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskCanceled is called by application worker when it is successfully canceled an ActivityTask. It will\n * result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCanceled(1: RespondActivityTaskCanceledRequest canceledRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in\n * WorkflowExecutionSignaled event recorded in the history and a decision task being created for the execution.\n **/\n void SignalWorkflowExecution(1: SignalWorkflowExecutionRequest signalRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.LimitExceededError limitExceededError,\n )\n\n /**\n * SignalWithStartWorkflowExecution is used to ensure sending a signal event to a workflow execution.\n * If workflow is running, this results in WorkflowExecutionSignaled event recorded in the history\n * and a decision task being created for the execution.\n * If workflow is not running or not found, it will first try start workflow with given WorkflowIDResuePolicy,\n * and record WorkflowExecutionStarted and WorkflowExecutionSignaled event in case of success.\n * It will return `WorkflowExecutionAlreadyStartedError` if start workflow failed with given policy.\n **/\n shared.StartWorkflowExecutionResponse SignalWithStartWorkflowExecution(1: SignalWithStartWorkflowExecutionRequest signalWithStartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: ShardOwnershipLostError shardOwnershipLostError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.WorkflowExecutionAlreadyStartedError workflowAlreadyStartedError,\n )\n\n /**\n * RemoveSignalMutableState is used to remove a signal request ID that was previously recorded. This is currently\n * used to clean execution info when signal decision finished.\n **/\n void RemoveSignalMutableState(1: RemoveSignalMutableStateRequest removeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * TerminateWorkflowExecution terminates an existing workflow execution by recording WorkflowExecutionTerminated event\n * in the history and immediately terminating the execution instance.\n **/\n void TerminateWorkflowExecution(1: TerminateWorkflowExecutionRequest terminateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * ResetWorkflowExecution reset an existing workflow execution by a firstEventID of a existing event batch\n * in the history and immediately terminating the current execution instance.\n * After reset, the history will grow from nextFirstEventID.\n **/\n shared.ResetWorkflowExecutionResponse ResetWorkflowExecution(1: ResetWorkflowExecutionRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RequestCancelWorkflowExecution is called by application worker when it wants to request cancellation of a workflow instance.\n * It will result in a new 'WorkflowExecutionCancelRequested' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. It fails with 'EntityNotExistsError' if the workflow is not valid\n * anymore due to completion or doesn't exist.\n **/\n void RequestCancelWorkflowExecution(1: RequestCancelWorkflowExecutionRequest cancelRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.CancellationAlreadyRequestedError cancellationAlreadyRequestedError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * ScheduleDecisionTask is used for creating a decision task for already started workflow execution. This is mainly\n * used by transfer queue processor during the processing of StartChildWorkflowExecution task, where it first starts\n * child execution without creating the decision task and then calls this API after updating the mutable state of\n * parent execution.\n **/\n void ScheduleDecisionTask(1: ScheduleDecisionTaskRequest scheduleRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordChildExecutionCompleted is used for reporting the completion of child workflow execution to parent.\n * This is mainly called by transfer queue processor during the processing of DeleteExecution task.\n **/\n void RecordChildExecutionCompleted(1: RecordChildExecutionCompletedRequest completionRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * DescribeWorkflowExecution returns information about the specified workflow execution.\n **/\n shared.DescribeWorkflowExecutionResponse DescribeWorkflowExecution(1: DescribeWorkflowExecutionRequest describeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateEvents(1: ReplicateEventsRequest replicateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskError retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateRawEvents(1: ReplicateRawEventsRequest replicateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskError retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateEventsV2(1: ReplicateEventsV2Request replicateV2Request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskV2Error retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SyncShardStatus sync the status between shards\n **/\n void SyncShardStatus(1: SyncShardStatusRequest syncShardStatusRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SyncActivity sync the activity status\n **/\n void SyncActivity(1: SyncActivityRequest syncActivityRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.RetryTaskError retryTaskError,\n )\n\n /**\n * DescribeMutableState returns information about the internal states of workflow mutable state.\n **/\n DescribeMutableStateResponse DescribeMutableState(1: DescribeMutableStateRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.AccessDeniedError accessDeniedError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.LimitExceededError limitExceededError,\n )\n\n /**\n * DescribeHistoryHost returns information about the internal states of a history host\n **/\n shared.DescribeHistoryHostResponse DescribeHistoryHost(1: shared.DescribeHistoryHostRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * CloseShard close the shard\n **/\n void CloseShard(1: shared.CloseShardRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RemoveTask remove task based on type, taskid, shardid\n **/\n void RemoveTask(1: shared.RemoveTaskRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n replicator.GetReplicationMessagesResponse GetReplicationMessages(1: replicator.GetReplicationMessagesRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.LimitExceededError limitExceededError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * QueryWorkflow returns query result for a specified workflow execution\n **/\n QueryWorkflowResponse QueryWorkflow(1: QueryWorkflowRequest queryRequest)\n\tthrows (\n\t 1: shared.BadRequestError badRequestError,\n\t 2: shared.InternalServiceError internalServiceError,\n\t 3: shared.EntityNotExistsError entityNotExistError,\n\t 4: shared.QueryFailedError queryFailedError,\n\t 5: shared.LimitExceededError limitExceededError,\n\t 6: shared.ServiceBusyError serviceBusyError,\n\t 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n\t)\n\n /**\n * ReapplyEvents applies stale events to the current workflow and current run\n **/\n void ReapplyEvents(1: ReapplyEventsRequest reapplyEventsRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.DomainNotActiveError domainNotActiveError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: ShardOwnershipLostError shardOwnershipLostError,\n 7: shared.EntityNotExistsError entityNotExistError,\n )\n}\n" // HistoryService_CloseShard_Args represents the arguments for the HistoryService.CloseShard function. // @@ -17970,7 +18156,7 @@ func (v *HistoryService_QueryWorkflow_Result) EnvelopeType() wire.EnvelopeType { // // The arguments for ReapplyEvents are sent and received over the wire as this struct. type HistoryService_ReapplyEvents_Args struct { - ReapplyEventsRequest *shared.ReapplyEventsRequest `json:"reapplyEventsRequest,omitempty"` + ReapplyEventsRequest *ReapplyEventsRequest `json:"reapplyEventsRequest,omitempty"` } // ToWire translates a HistoryService_ReapplyEvents_Args struct into a Thrift-level intermediate @@ -18008,8 +18194,8 @@ func (v *HistoryService_ReapplyEvents_Args) ToWire() (wire.Value, error) { return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } -func _ReapplyEventsRequest_Read(w wire.Value) (*shared.ReapplyEventsRequest, error) { - var v shared.ReapplyEventsRequest +func _ReapplyEventsRequest_1_Read(w wire.Value) (*ReapplyEventsRequest, error) { + var v ReapplyEventsRequest err := v.FromWire(w) return &v, err } @@ -18038,7 +18224,7 @@ func (v *HistoryService_ReapplyEvents_Args) FromWire(w wire.Value) error { switch field.ID { case 1: if field.Value.Type() == wire.TStruct { - v.ReapplyEventsRequest, err = _ReapplyEventsRequest_Read(field.Value) + v.ReapplyEventsRequest, err = _ReapplyEventsRequest_1_Read(field.Value) if err != nil { return err } @@ -18098,7 +18284,7 @@ func (v *HistoryService_ReapplyEvents_Args) MarshalLogObject(enc zapcore.ObjectE // GetReapplyEventsRequest returns the value of ReapplyEventsRequest if it is set or its // zero value if it is unset. -func (v *HistoryService_ReapplyEvents_Args) GetReapplyEventsRequest() (o *shared.ReapplyEventsRequest) { +func (v *HistoryService_ReapplyEvents_Args) GetReapplyEventsRequest() (o *ReapplyEventsRequest) { if v != nil && v.ReapplyEventsRequest != nil { return v.ReapplyEventsRequest } @@ -18133,7 +18319,7 @@ var HistoryService_ReapplyEvents_Helper = struct { // Args accepts the parameters of ReapplyEvents in-order and returns // the arguments struct for the function. Args func( - reapplyEventsRequest *shared.ReapplyEventsRequest, + reapplyEventsRequest *ReapplyEventsRequest, ) *HistoryService_ReapplyEvents_Args // IsException returns true if the given error can be thrown @@ -18174,7 +18360,7 @@ var HistoryService_ReapplyEvents_Helper = struct { func init() { HistoryService_ReapplyEvents_Helper.Args = func( - reapplyEventsRequest *shared.ReapplyEventsRequest, + reapplyEventsRequest *ReapplyEventsRequest, ) *HistoryService_ReapplyEvents_Args { return &HistoryService_ReapplyEvents_Args{ ReapplyEventsRequest: reapplyEventsRequest, diff --git a/.gen/go/history/historyserviceclient/client.go b/.gen/go/history/historyserviceclient/client.go index bfb519c825d..fd2973c9964 100644 --- a/.gen/go/history/historyserviceclient/client.go +++ b/.gen/go/history/historyserviceclient/client.go @@ -1,6 +1,6 @@ // The MIT License (MIT) // -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2019 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -89,7 +89,7 @@ type Interface interface { ReapplyEvents( ctx context.Context, - ReapplyEventsRequest *shared.ReapplyEventsRequest, + ReapplyEventsRequest *history.ReapplyEventsRequest, opts ...yarpc.CallOption, ) error @@ -448,7 +448,7 @@ func (c client) QueryWorkflow( func (c client) ReapplyEvents( ctx context.Context, - _ReapplyEventsRequest *shared.ReapplyEventsRequest, + _ReapplyEventsRequest *history.ReapplyEventsRequest, opts ...yarpc.CallOption, ) (err error) { diff --git a/.gen/go/history/historyservicefx/client.go b/.gen/go/history/historyservicefx/client.go index 6b08d6aa748..4e8992d86ae 100644 --- a/.gen/go/history/historyservicefx/client.go +++ b/.gen/go/history/historyservicefx/client.go @@ -1,6 +1,6 @@ // The MIT License (MIT) // -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2019 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/.gen/go/history/historyservicefx/doc.go b/.gen/go/history/historyservicefx/doc.go index 24dbce5a885..b11403b568f 100644 --- a/.gen/go/history/historyservicefx/doc.go +++ b/.gen/go/history/historyservicefx/doc.go @@ -1,6 +1,6 @@ // The MIT License (MIT) // -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2019 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/.gen/go/history/historyservicefx/server.go b/.gen/go/history/historyservicefx/server.go index 5edf9fe3029..38abd819896 100644 --- a/.gen/go/history/historyservicefx/server.go +++ b/.gen/go/history/historyservicefx/server.go @@ -1,6 +1,6 @@ // The MIT License (MIT) // -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2019 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/.gen/go/history/historyserviceserver/server.go b/.gen/go/history/historyserviceserver/server.go index 314ea28bf71..1086dc7df19 100644 --- a/.gen/go/history/historyserviceserver/server.go +++ b/.gen/go/history/historyserviceserver/server.go @@ -1,6 +1,6 @@ // The MIT License (MIT) // -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2019 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -79,7 +79,7 @@ type Interface interface { ReapplyEvents( ctx context.Context, - ReapplyEventsRequest *shared.ReapplyEventsRequest, + ReapplyEventsRequest *history.ReapplyEventsRequest, ) error RecordActivityTaskHeartbeat( @@ -309,7 +309,7 @@ func New(impl Interface, opts ...thrift.RegisterOption) []transport.Procedure { Type: transport.Unary, Unary: thrift.UnaryHandler(h.ReapplyEvents), }, - Signature: "ReapplyEvents(ReapplyEventsRequest *shared.ReapplyEventsRequest)", + Signature: "ReapplyEvents(ReapplyEventsRequest *history.ReapplyEventsRequest)", ThriftModule: history.ThriftModule, }, diff --git a/.gen/go/history/historyservicetest/client.go b/.gen/go/history/historyservicetest/client.go index 1ea54f49eed..cf98c2347f8 100644 --- a/.gen/go/history/historyservicetest/client.go +++ b/.gen/go/history/historyservicetest/client.go @@ -1,6 +1,6 @@ // The MIT License (MIT) // -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2019 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -336,7 +336,7 @@ func (mr *_MockClientRecorder) QueryWorkflow( // ... := client.ReapplyEvents(...) func (m *MockClient) ReapplyEvents( ctx context.Context, - _ReapplyEventsRequest *shared.ReapplyEventsRequest, + _ReapplyEventsRequest *history.ReapplyEventsRequest, opts ...yarpc.CallOption, ) (err error) { diff --git a/.gen/go/replicator/replicator.go b/.gen/go/replicator/replicator.go index c46fc14187a..387cf9eafaf 100644 --- a/.gen/go/replicator/replicator.go +++ b/.gen/go/replicator/replicator.go @@ -1,6 +1,6 @@ // The MIT License (MIT) // -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2019 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/.gen/go/shared/shared.go b/.gen/go/shared/shared.go index 27ee57bc567..8a3de950a79 100644 --- a/.gen/go/shared/shared.go +++ b/.gen/go/shared/shared.go @@ -1,17 +1,17 @@ // The MIT License (MIT) -// -// Copyright (c) 2017 Uber Technologies, Inc. -// +// +// Copyright (c) 2019 Uber Technologies, Inc. +// // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: -// +// // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE diff --git a/LICENSE b/LICENSE index e8bcd56af0a..4fe1f5bebfc 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2017 Uber Technologies, Inc. +Copyright (c) 2019 Uber Technologies, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/client/clientBean.go b/client/clientBean.go index e9bd9c65b5e..713b8bf7e6e 100644 --- a/client/clientBean.go +++ b/client/clientBean.go @@ -51,10 +51,15 @@ type ( // Bean in an collection of clients Bean interface { GetHistoryClient() history.Client + SetHistoryClient(client history.Client) GetMatchingClient(domainIDToName DomainIDToNameFunc) (matching.Client, error) + SetMatchingClient(client matching.Client) GetFrontendClient() frontend.Client + SetFrontendClient(client frontend.Client) GetRemoteAdminClient(cluster string) admin.Client + SetRemoteAdminClient(cluster string, client admin.Client) GetRemoteFrontendClient(cluster string) frontend.Client + SetRemoteFrontendClient(cluster string, client frontend.Client) } // DispatcherProvider provides a diapatcher to a given address @@ -146,6 +151,13 @@ func (h *clientBeanImpl) GetHistoryClient() history.Client { return h.historyClient } +func (h *clientBeanImpl) SetHistoryClient( + client history.Client, +) { + + h.historyClient = client +} + func (h *clientBeanImpl) GetMatchingClient(domainIDToName DomainIDToNameFunc) (matching.Client, error) { if client := h.matchingClient.Load(); client != nil { return client.(matching.Client), nil @@ -153,10 +165,24 @@ func (h *clientBeanImpl) GetMatchingClient(domainIDToName DomainIDToNameFunc) (m return h.lazyInitMatchingClient(domainIDToName) } +func (h *clientBeanImpl) SetMatchingClient( + client matching.Client, +) { + + h.matchingClient.Store(client) +} + func (h *clientBeanImpl) GetFrontendClient() frontend.Client { return h.frontendClient } +func (h *clientBeanImpl) SetFrontendClient( + client frontend.Client, +) { + + h.frontendClient = client +} + func (h *clientBeanImpl) GetRemoteAdminClient(cluster string) admin.Client { client, ok := h.remoteAdminClients[cluster] if !ok { @@ -169,6 +195,14 @@ func (h *clientBeanImpl) GetRemoteAdminClient(cluster string) admin.Client { return client } +func (h *clientBeanImpl) SetRemoteAdminClient( + cluster string, + client admin.Client, +) { + + h.remoteAdminClients[cluster] = client +} + func (h *clientBeanImpl) GetRemoteFrontendClient(cluster string) frontend.Client { client, ok := h.remoteFrontendClients[cluster] if !ok { @@ -181,6 +215,14 @@ func (h *clientBeanImpl) GetRemoteFrontendClient(cluster string) frontend.Client return client } +func (h *clientBeanImpl) SetRemoteFrontendClient( + cluster string, + client frontend.Client, +) { + + h.remoteFrontendClients[cluster] = client +} + func (h *clientBeanImpl) lazyInitMatchingClient(domainIDToName DomainIDToNameFunc) (matching.Client, error) { h.Lock() defer h.Unlock() diff --git a/client/clientBean_mock.go b/client/clientBean_mock.go index 5d95b3b37bf..587e2c7ac2e 100644 --- a/client/clientBean_mock.go +++ b/client/clientBean_mock.go @@ -51,6 +51,12 @@ func (_m *MockClientBean) GetHistoryClient() history.Client { return r0 } +// SetHistoryClient provides a mock function with given fields: _a0 +func (_m *MockClientBean) SetHistoryClient( + _a0 history.Client, +) { +} + // GetMatchingClient provides a mock function with given fields: domainIDToName func (_m *MockClientBean) GetMatchingClient(domainIDToName DomainIDToNameFunc) (matching.Client, error) { ret := _m.Called(domainIDToName) @@ -74,6 +80,12 @@ func (_m *MockClientBean) GetMatchingClient(domainIDToName DomainIDToNameFunc) ( return r0, r1 } +// SetMatchingClient provides a mock function with given fields: _a0 +func (_m *MockClientBean) SetMatchingClient( + _a0 matching.Client, +) { +} + // GetFrontendClient provides a mock function with given fields: func (_m *MockClientBean) GetFrontendClient() frontend.Client { ret := _m.Called() @@ -90,6 +102,12 @@ func (_m *MockClientBean) GetFrontendClient() frontend.Client { return r0 } +// SetFrontendClient provides a mock function with given fields: _a0 +func (_m *MockClientBean) SetFrontendClient( + _a0 frontend.Client, +) { +} + // GetRemoteAdminClient provides a mock function with given fields: _a0 func (_m *MockClientBean) GetRemoteAdminClient(_a0 string) admin.Client { ret := _m.Called(_a0) @@ -106,6 +124,13 @@ func (_m *MockClientBean) GetRemoteAdminClient(_a0 string) admin.Client { return r0 } +// SetRemoteAdminClient provides a mock function with given fields: _a0, _a1 +func (_m *MockClientBean) SetRemoteAdminClient( + _a0 string, + _a1 admin.Client, +) { +} + // GetRemoteFrontendClient provides a mock function with given fields: _a0 func (_m *MockClientBean) GetRemoteFrontendClient(_a0 string) frontend.Client { ret := _m.Called(_a0) @@ -121,3 +146,10 @@ func (_m *MockClientBean) GetRemoteFrontendClient(_a0 string) frontend.Client { return r0 } + +// SetRemoteFrontendClient provides a mock function with given fields: _a0, _a1 +func (_m *MockClientBean) SetRemoteFrontendClient( + _a0 string, + _a1 frontend.Client, +) { +} diff --git a/client/history/client.go b/client/history/client.go index 5246953d38e..9de6b9180d5 100644 --- a/client/history/client.go +++ b/client/history/client.go @@ -812,10 +812,10 @@ func (c *clientImpl) GetReplicationMessages( func (c *clientImpl) ReapplyEvents( ctx context.Context, - request *workflow.ReapplyEventsRequest, + request *h.ReapplyEventsRequest, opts ...yarpc.CallOption, ) error { - client, err := c.getClientForWorkflowID(request.GetWorkflowExecution().GetWorkflowId()) + client, err := c.getClientForWorkflowID(request.GetRequest().GetWorkflowExecution().GetWorkflowId()) if err != nil { return err } diff --git a/client/history/metricClient.go b/client/history/metricClient.go index 1b8d06ee2ca..d6ba6a034bf 100644 --- a/client/history/metricClient.go +++ b/client/history/metricClient.go @@ -561,7 +561,7 @@ func (c *metricClient) QueryWorkflow( func (c *metricClient) ReapplyEvents( ctx context.Context, - request *shared.ReapplyEventsRequest, + request *h.ReapplyEventsRequest, opts ...yarpc.CallOption, ) error { diff --git a/client/history/retryableClient.go b/client/history/retryableClient.go index 7fc2a953837..084bc4686c1 100644 --- a/client/history/retryableClient.go +++ b/client/history/retryableClient.go @@ -498,7 +498,7 @@ func (c *retryableClient) QueryWorkflow( func (c *retryableClient) ReapplyEvents( ctx context.Context, - request *shared.ReapplyEventsRequest, + request *h.ReapplyEventsRequest, opts ...yarpc.CallOption, ) error { diff --git a/common/metrics/defs.go b/common/metrics/defs.go index c4e2a2ab9d7..f47faf52c2c 100644 --- a/common/metrics/defs.go +++ b/common/metrics/defs.go @@ -710,6 +710,8 @@ const ( HistoryGetReplicationMessagesScope // HistoryShardControllerScope is the scope used by shard controller HistoryShardControllerScope + // HistoryReapplyEventsScope is the scope used by event reapplication + HistoryReapplyEventsScope // TransferQueueProcessorScope is the scope used by all metric emitted by transfer queue processor TransferQueueProcessorScope // TransferActiveQueueProcessorScope is the scope used by all metric emitted by transfer queue processor @@ -1205,6 +1207,7 @@ var ScopeDefs = map[ServiceIdx]map[int]scopeDefinition{ HistoryDescribeMutableStateScope: {operation: "DescribeMutableState"}, HistoryGetReplicationMessagesScope: {operation: "GetReplicationMessages"}, HistoryShardControllerScope: {operation: "ShardController"}, + HistoryReapplyEventsScope: {operation: "EventReapplication"}, TransferQueueProcessorScope: {operation: "TransferQueueProcessor"}, TransferActiveQueueProcessorScope: {operation: "TransferActiveQueueProcessor"}, TransferStandbyQueueProcessorScope: {operation: "TransferStandbyQueueProcessor"}, @@ -1518,6 +1521,7 @@ const ( GetReplicationMessagesForShardLatency ArchiveVisibilityAttemptCount ArchiveVisibilityFailedCount + EventReapplySkippedCount NumHistoryMetrics ) @@ -1813,6 +1817,7 @@ var MetricDefs = map[ServiceIdx]map[int]metricDefinition{ GetReplicationMessagesForShardLatency: {metricName: "get_replication_messages_for_shard", metricType: Timer}, ArchiveVisibilityAttemptCount: {metricName: "archive_visibility_attempt_count", metricType: Counter}, ArchiveVisibilityFailedCount: {metricName: "archive_visibility_failed_count", metricType: Counter}, + EventReapplySkippedCount: {metricName: "event_reapply_skipped_count", metricType: Counter}, }, Matching: { PollSuccessCounter: {metricName: "poll_success"}, diff --git a/common/service/service.go b/common/service/service.go index bde5196b0e8..ca940fc8c09 100644 --- a/common/service/service.go +++ b/common/service/service.go @@ -39,6 +39,7 @@ import ( "github.com/uber/cadence/common/membership" "github.com/uber/cadence/common/messaging" "github.com/uber/cadence/common/metrics" + "github.com/uber/cadence/common/persistence" "github.com/uber/cadence/common/service/config" "github.com/uber/cadence/common/service/dynamicconfig" "go.uber.org/cadence/.gen/go/cadence/workflowserviceclient" @@ -113,6 +114,7 @@ type ( dispatcherProvider client.DispatcherProvider archivalMetadata archiver.ArchivalMetadata archiverProvider provider.ArchiverProvider + serializer persistence.PayloadSerializer } ) @@ -139,6 +141,7 @@ func New(params *BootstrapParams) Service { dynamicCollection: dynamicconfig.NewCollection(params.DynamicConfig, params.Logger), archivalMetadata: params.ArchivalMetadata, archiverProvider: params.ArchiverProvider, + serializer: persistence.NewPayloadSerializer(), } sVice.runtimeMetricsReporter = metrics.NewRuntimeMetricsReporter(params.MetricScope, time.Minute, sVice.GetLogger(), params.InstanceID) @@ -284,6 +287,10 @@ func (h *serviceImpl) GetArchiverProvider() provider.ArchiverProvider { return h.archiverProvider } +func (h *serviceImpl) GetPayloadSerializer() persistence.PayloadSerializer { + return h.serializer +} + // GetMetricsServiceIdx returns the metrics name func GetMetricsServiceIdx(serviceName string, logger log.Logger) metrics.ServiceIdx { switch serviceName { diff --git a/common/service/serviceTestBase.go b/common/service/serviceTestBase.go index 63239a02545..430d3844089 100644 --- a/common/service/serviceTestBase.go +++ b/common/service/serviceTestBase.go @@ -31,6 +31,8 @@ import ( "github.com/uber/cadence/common/membership" "github.com/uber/cadence/common/messaging" "github.com/uber/cadence/common/metrics" + "github.com/uber/cadence/common/persistence" + "go.uber.org/yarpc" "go.uber.org/zap" ) @@ -47,6 +49,7 @@ type ( membershipMonitor membership.Monitor archivalMetadata archiver.ArchivalMetadata archiverProvider provider.ArchiverProvider + serializer persistence.PayloadSerializer metrics metrics.Client logger log.Logger @@ -71,6 +74,7 @@ func NewTestService( clientBean client.Bean, archivalMetadata archiver.ArchivalMetadata, archiverProvider provider.ArchiverProvider, + serializer persistence.PayloadSerializer, ) Service { zapLogger, err := zap.NewDevelopment() @@ -89,6 +93,7 @@ func NewTestService( logger: logger, archivalMetadata: archivalMetadata, archiverProvider: archiverProvider, + serializer: serializer, } } @@ -162,3 +167,8 @@ func (s *serviceTestBase) GetArchivalMetadata() archiver.ArchivalMetadata { func (s *serviceTestBase) GetArchiverProvider() provider.ArchiverProvider { return s.archiverProvider } + +// GetPayloadSerializer returns the payload serializer used by the service +func (s *serviceTestBase) GetPayloadSerializer() persistence.PayloadSerializer { + return s.serializer +} diff --git a/common/service/serviceinterfaces.go b/common/service/serviceinterfaces.go index 3ec7fe4fd93..924408fba64 100644 --- a/common/service/serviceinterfaces.go +++ b/common/service/serviceinterfaces.go @@ -30,6 +30,8 @@ import ( "github.com/uber/cadence/common/membership" "github.com/uber/cadence/common/messaging" "github.com/uber/cadence/common/metrics" + "github.com/uber/cadence/common/persistence" + "go.uber.org/yarpc" ) @@ -70,5 +72,7 @@ type ( GetArchivalMetadata() archiver.ArchivalMetadata GetArchiverProvider() provider.ArchiverProvider + + GetPayloadSerializer() persistence.PayloadSerializer } ) diff --git a/host/ndc/nDC_integration_test.go b/host/ndc/nDC_integration_test.go index 1072159c2d2..c6fd9a78895 100644 --- a/host/ndc/nDC_integration_test.go +++ b/host/ndc/nDC_integration_test.go @@ -29,9 +29,13 @@ import ( "testing" "time" + "github.com/golang/mock/gomock" "github.com/pborman/uuid" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/uber/cadence/.gen/go/cadence/workflowservicetest" + "github.com/uber/cadence/.gen/go/replicator" + "github.com/uber/cadence/client/frontend" "go.uber.org/zap" "gopkg.in/yaml.v2" @@ -60,9 +64,10 @@ type ( serializer persistence.PayloadSerializer logger log.Logger - domainName string - domainID string - version int64 + domainName string + domainID string + version int64 + mockFrontendClient map[string]frontend.Client } ) @@ -103,6 +108,20 @@ func (s *nDCIntegrationTestSuite) SetupSuite() { clusterConfigs[0].WorkerConfig = &host.WorkerConfig{} clusterConfigs[1].WorkerConfig = &host.WorkerConfig{} + s.mockFrontendClient = make(map[string]frontend.Client) + controller := gomock.NewController(s.T()) + mockStandbyClient := workflowservicetest.NewMockClient(controller) + mockStandbyClient.EXPECT().GetReplicationMessages(gomock.Any(), gomock.Any()).Return(&replicator.GetReplicationMessagesResponse{ + MessagesByShard: make(map[int32]*replicator.ReplicationMessages), + }, nil).AnyTimes() + mockOtherClient := workflowservicetest.NewMockClient(controller) + mockOtherClient.EXPECT().GetReplicationMessages(gomock.Any(), gomock.Any()).Return(&replicator.GetReplicationMessagesResponse{ + MessagesByShard: make(map[int32]*replicator.ReplicationMessages), + }, nil).AnyTimes() + s.mockFrontendClient["standby"] = mockStandbyClient + s.mockFrontendClient["other"] = mockOtherClient + clusterConfigs[0].MockFrontendClient = s.mockFrontendClient + cluster, err := host.NewCluster(clusterConfigs[0], s.logger.WithTags(tag.ClusterName(clusterName[0]))) s.Require().NoError(err) s.active = cluster @@ -128,6 +147,7 @@ func (s *nDCIntegrationTestSuite) TearDownSuite() { func (s *nDCIntegrationTestSuite) TestSingleBranch() { + s.setupRemoteFrontendClients() workflowID := "ndc-single-branch-test" + uuid.New() workflowType := "event-generator-workflow-type" @@ -141,7 +161,6 @@ func (s *nDCIntegrationTestSuite) TestSingleBranch() { runID := uuid.New() historyBatch := []*shared.History{} s.generator = test.InitializeHistoryEventGenerator(s.domainName, version) - s.generator.SetVersion(version) for s.generator.HasNextVertex() { events := s.generator.GetNextVertices() @@ -199,6 +218,8 @@ func (s *nDCIntegrationTestSuite) TestSingleBranch() { } func (s *nDCIntegrationTestSuite) TestMultipleBranches() { + + s.setupRemoteFrontendClients() workflowID := "ndc-multiple-branches-test" + uuid.New() workflowType := "event-generator-workflow-type" @@ -283,6 +304,8 @@ func (s *nDCIntegrationTestSuite) TestMultipleBranches() { } func (s *nDCIntegrationTestSuite) TestHandcraftedMultipleBranches() { + + s.setupRemoteFrontendClients() workflowID := "ndc-handcrafted-multiple-branches-test" + uuid.New() runID := uuid.New() @@ -294,7 +317,7 @@ func (s *nDCIntegrationTestSuite) TestHandcraftedMultipleBranches() { historyClient := s.active.GetHistoryClient() eventsBatch1 := []*shared.History{ - &shared.History{Events: []*shared.HistoryEvent{ + {Events: []*shared.HistoryEvent{ { EventId: common.Int64Ptr(1), Version: common.Int64Ptr(21), @@ -319,7 +342,7 @@ func (s *nDCIntegrationTestSuite) TestHandcraftedMultipleBranches() { }, }, }}, - &shared.History{Events: []*shared.HistoryEvent{ + {Events: []*shared.HistoryEvent{ { EventId: common.Int64Ptr(3), Version: common.Int64Ptr(21), @@ -331,7 +354,7 @@ func (s *nDCIntegrationTestSuite) TestHandcraftedMultipleBranches() { }, }, }}, - &shared.History{Events: []*shared.HistoryEvent{ + {Events: []*shared.HistoryEvent{ { EventId: common.Int64Ptr(4), Version: common.Int64Ptr(21), @@ -369,7 +392,7 @@ func (s *nDCIntegrationTestSuite) TestHandcraftedMultipleBranches() { }, }, }}, - &shared.History{Events: []*shared.HistoryEvent{ + {Events: []*shared.HistoryEvent{ { EventId: common.Int64Ptr(7), Version: common.Int64Ptr(21), @@ -382,7 +405,7 @@ func (s *nDCIntegrationTestSuite) TestHandcraftedMultipleBranches() { }, }, }}, - &shared.History{Events: []*shared.HistoryEvent{ + {Events: []*shared.HistoryEvent{ { EventId: common.Int64Ptr(8), Version: common.Int64Ptr(21), @@ -404,7 +427,7 @@ func (s *nDCIntegrationTestSuite) TestHandcraftedMultipleBranches() { }, }, }}, - &shared.History{Events: []*shared.HistoryEvent{ + {Events: []*shared.HistoryEvent{ { EventId: common.Int64Ptr(10), Version: common.Int64Ptr(21), @@ -416,7 +439,7 @@ func (s *nDCIntegrationTestSuite) TestHandcraftedMultipleBranches() { }, }, }}, - &shared.History{Events: []*shared.HistoryEvent{ + {Events: []*shared.HistoryEvent{ { EventId: common.Int64Ptr(11), Version: common.Int64Ptr(21), @@ -461,7 +484,7 @@ func (s *nDCIntegrationTestSuite) TestHandcraftedMultipleBranches() { } eventsBatch2 := []*shared.History{ - &shared.History{Events: []*shared.HistoryEvent{ + {Events: []*shared.HistoryEvent{ { EventId: common.Int64Ptr(15), Version: common.Int64Ptr(31), @@ -474,7 +497,7 @@ func (s *nDCIntegrationTestSuite) TestHandcraftedMultipleBranches() { } eventsBatch3 := []*shared.History{ - &shared.History{Events: []*shared.HistoryEvent{ + {Events: []*shared.HistoryEvent{ { EventId: common.Int64Ptr(15), Version: common.Int64Ptr(30), @@ -506,7 +529,7 @@ func (s *nDCIntegrationTestSuite) TestHandcraftedMultipleBranches() { }, }, }}, - &shared.History{Events: []*shared.HistoryEvent{ + {Events: []*shared.HistoryEvent{ { EventId: common.Int64Ptr(18), Version: common.Int64Ptr(30), @@ -518,7 +541,7 @@ func (s *nDCIntegrationTestSuite) TestHandcraftedMultipleBranches() { }, }, }}, - &shared.History{Events: []*shared.HistoryEvent{ + {Events: []*shared.HistoryEvent{ { EventId: common.Int64Ptr(19), Version: common.Int64Ptr(30), @@ -585,6 +608,161 @@ func (s *nDCIntegrationTestSuite) TestHandcraftedMultipleBranches() { ) } +func (s *nDCIntegrationTestSuite) TestEventsReapply_ZombieWorkflow() { + + workflowID := "ndc-single-branch-test" + uuid.New() + + workflowType := "event-generator-workflow-type" + tasklist := "event-generator-taskList" + + // active has initial version 0 + historyClient := s.active.GetHistoryClient() + + version := int64(101) + runID := uuid.New() + historyBatch := []*shared.History{} + s.generator = test.InitializeHistoryEventGenerator(s.domainName, version) + + for s.generator.HasNextVertex() { + events := s.generator.GetNextVertices() + historyEvents := &shared.History{} + for _, event := range events { + historyEvents.Events = append(historyEvents.Events, event.GetData().(*shared.HistoryEvent)) + } + historyBatch = append(historyBatch, historyEvents) + } + + versionHistory := s.eventBatchesToVersionHistory(nil, historyBatch) + s.applyEvents( + workflowID, + runID, + workflowType, + tasklist, + versionHistory, + historyBatch, + historyClient, + ) + + version = int64(1) + runID = uuid.New() + historyBatch = []*shared.History{} + s.generator = test.InitializeHistoryEventGenerator(s.domainName, version) + + // verify two batches of zombie workflow are call reapply API + s.mockFrontendClient["standby"].(*workflowservicetest.MockClient).EXPECT().ReapplyEvents(gomock.Any(), gomock.Any()).Return(nil).Times(2) + for i := 0; i < 2 && s.generator.HasNextVertex(); i++ { + events := s.generator.GetNextVertices() + historyEvents := &shared.History{} + for _, event := range events { + historyEvents.Events = append(historyEvents.Events, event.GetData().(*shared.HistoryEvent)) + } + historyBatch = append(historyBatch, historyEvents) + } + + versionHistory = s.eventBatchesToVersionHistory(nil, historyBatch) + s.applyEvents( + workflowID, + runID, + workflowType, + tasklist, + versionHistory, + historyBatch, + historyClient, + ) +} + +func (s *nDCIntegrationTestSuite) TestEventsReapply_UpdateNonCurrentBranch() { + workflowID := "ndc-single-branch-test" + uuid.New() + runID := uuid.New() + workflowType := "event-generator-workflow-type" + tasklist := "event-generator-taskList" + version := int64(101) + + historyClient := s.active.GetHistoryClient() + + s.generator = test.InitializeHistoryEventGenerator(s.domainName, version) + currentBranch := []*shared.History{} + var taskID int64 + for i := 0; i < 10 && s.generator.HasNextVertex(); i++ { + events := s.generator.GetNextVertices() + historyEvents := &shared.History{} + for _, event := range events { + history := event.GetData().(*shared.HistoryEvent) + taskID = history.GetTaskId() + historyEvents.Events = append(historyEvents.Events, history) + } + currentBranch = append(currentBranch, historyEvents) + } + versionHistory := s.eventBatchesToVersionHistory(nil, currentBranch) + s.applyEvents( + workflowID, + runID, + workflowType, + tasklist, + versionHistory, + currentBranch, + historyClient, + ) + + newGenerator := s.generator.DeepCopy() + newGenerator.SetVersion(int64(102)) + newBranch := []*shared.History{} + newVersionHistory := versionHistory.Duplicate() + for i := 0; i < 10 && newGenerator.HasNextVertex(); i++ { + events := newGenerator.GetNextVertices() + historyEvents := &shared.History{} + for _, event := range events { + history := event.GetData().(*shared.HistoryEvent) + taskID = history.GetTaskId() + historyEvents.Events = append(historyEvents.Events, history) + } + newBranch = append(newBranch, historyEvents) + } + newVersionHistory = s.eventBatchesToVersionHistory(newVersionHistory, newBranch) + s.applyEvents( + workflowID, + runID, + workflowType, + tasklist, + newVersionHistory, + newBranch, + historyClient, + ) + + s.mockFrontendClient["standby"].(*workflowservicetest.MockClient).EXPECT().ReapplyEvents(gomock.Any(), gomock.Any()).Return(nil).Times(1) + // Handcraft a stale signal event + currentEvents := currentBranch[len(currentBranch)-1].GetEvents() + staleEventID := currentEvents[len(currentEvents)-1].GetEventId() + 1 + staleBranch := []*shared.History{ + { + Events: []*shared.HistoryEvent{ + { + EventId: common.Int64Ptr(staleEventID), + EventType: common.EventTypePtr(shared.EventTypeWorkflowExecutionSignaled), + Timestamp: common.Int64Ptr(time.Now().UnixNano()), + Version: common.Int64Ptr(101), + TaskId: common.Int64Ptr(taskID), + WorkflowExecutionSignaledEventAttributes: &shared.WorkflowExecutionSignaledEventAttributes{ + SignalName: common.StringPtr("signal"), + Input: []byte{}, + Identity: common.StringPtr("ndc_integration_test"), + }, + }, + }, + }, + } + staleVersionHistory := s.eventBatchesToVersionHistory(nil, staleBranch) + s.applyEvents( + workflowID, + runID, + workflowType, + tasklist, + staleVersionHistory, + staleBranch, + historyClient, + ) +} + func (s *nDCIntegrationTestSuite) registerDomain() { s.domainName = "test-simple-workflow-ndc-" + common.GenerateRandomString(5) client1 := s.active.GetFrontendClient() // active @@ -770,3 +948,8 @@ func (s *nDCIntegrationTestSuite) createContext() context.Context { ctx, _ := context.WithTimeout(context.Background(), 90*time.Second) return ctx } + +func (s *nDCIntegrationTestSuite) setupRemoteFrontendClients() { + s.mockFrontendClient["standby"].(*workflowservicetest.MockClient).EXPECT().ReapplyEvents(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + s.mockFrontendClient["other"].(*workflowservicetest.MockClient).EXPECT().ReapplyEvents(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() +} diff --git a/host/onebox.go b/host/onebox.go index 553257fabf6..18dca24e06f 100644 --- a/host/onebox.go +++ b/host/onebox.go @@ -29,11 +29,13 @@ import ( "github.com/pborman/uuid" "github.com/stretchr/testify/mock" "github.com/uber-go/tally" + "github.com/uber/cadence/.gen/go/admin/adminserviceclient" "github.com/uber/cadence/.gen/go/cadence/workflowserviceclient" "github.com/uber/cadence/.gen/go/history/historyserviceclient" "github.com/uber/cadence/.gen/go/shared" "github.com/uber/cadence/client" + frontendclient "github.com/uber/cadence/client/frontend" "github.com/uber/cadence/common" carchiver "github.com/uber/cadence/common/archiver" "github.com/uber/cadence/common/archiver/provider" @@ -108,6 +110,7 @@ type ( esConfig *elasticsearch.Config esClient elasticsearch.Client workerConfig *WorkerConfig + mockFrontendClient map[string]frontendclient.Client } // HistoryConfig contains configs for history service @@ -142,6 +145,7 @@ type ( ESConfig *elasticsearch.Config ESClient elasticsearch.Client WorkerConfig *WorkerConfig + MockFrontendClient map[string]frontendclient.Client } membershipFactoryImpl struct { @@ -175,6 +179,7 @@ func NewCadence(params *CadenceParams) Cadence { archiverProvider: params.ArchiverProvider, historyConfig: params.HistoryConfig, workerConfig: params.WorkerConfig, + mockFrontendClient: params.MockFrontendClient, } } @@ -462,6 +467,15 @@ func (c *cadenceImpl) startFrontend(hosts map[string][]string, startWG *sync.Wai // must start base service first c.frontEndService.Start() + if c.mockFrontendClient != nil { + clientBean := c.frontEndService.GetClientBean() + if clientBean != nil { + for serviceName, client := range c.mockFrontendClient { + clientBean.SetRemoteFrontendClient(serviceName, client) + } + } + } + err = c.adminHandler.Start() if err != nil { c.logger.Fatal("Failed to start admin", tag.Error(err)) @@ -547,6 +561,15 @@ func (c *cadenceImpl) startHistory(hosts map[string][]string, startWG *sync.Wait handler.RegisterHandler() service.Start() + if c.mockFrontendClient != nil { + clientBean := service.GetClientBean() + if clientBean != nil { + for serviceName, client := range c.mockFrontendClient { + clientBean.SetRemoteFrontendClient(serviceName, client) + } + } + } + err = handler.Start() if err != nil { c.logger.Fatal("Failed to start history", tag.Error(err)) @@ -582,6 +605,15 @@ func (c *cadenceImpl) startMatching(hosts map[string][]string, startWG *sync.Wai c.matchingHandler.RegisterHandler() service.Start() + if c.mockFrontendClient != nil { + clientBean := service.GetClientBean() + if clientBean != nil { + for serviceName, client := range c.mockFrontendClient { + clientBean.SetRemoteFrontendClient(serviceName, client) + } + } + } + err := c.matchingHandler.Start() if err != nil { c.logger.Fatal("Failed to start history", tag.Error(err)) diff --git a/host/testcluster.go b/host/testcluster.go index ad371131499..dd73b89a6be 100644 --- a/host/testcluster.go +++ b/host/testcluster.go @@ -26,6 +26,7 @@ import ( "github.com/uber-go/tally" "github.com/uber/cadence/client" + frontendclient "github.com/uber/cadence/client/frontend" "github.com/uber/cadence/common" "github.com/uber/cadence/common/archiver" "github.com/uber/cadence/common/archiver/filestore" @@ -76,6 +77,7 @@ type ( HistoryConfig *HistoryConfig ESConfig *elasticsearch.Config WorkerConfig *WorkerConfig + MockFrontendClient map[string]frontendclient.Client } // MessagingClientConfig is the config for messaging config @@ -171,6 +173,7 @@ func NewCluster(options *TestClusterConfig, logger log.Logger) (*TestCluster, er ArchiverProvider: archiverBase.provider, HistoryConfig: options.HistoryConfig, WorkerConfig: options.WorkerConfig, + MockFrontendClient: options.MockFrontendClient, } cluster := NewCadence(cadenceParams) if err := cluster.Start(); err != nil { diff --git a/host/testdata/ndc_integration_test_clusters.yaml b/host/testdata/ndc_integration_test_clusters.yaml index 981f99fc3b3..0321bce240f 100644 --- a/host/testdata/ndc_integration_test_clusters.yaml +++ b/host/testdata/ndc_integration_test_clusters.yaml @@ -38,7 +38,7 @@ enablereplicator: true enableindexer: false enableeventsv2: false - clusterno: 1 + clusterno: 0 historyconfig: numhistoryshards: 1 numhistoryhosts: 1 @@ -188,7 +188,7 @@ enablereplicator: true enableindexer: false enableeventsv2: false - clusterno: 1 + clusterno: 2 historyconfig: numhistoryshards: 1 numhistoryhosts: 1 diff --git a/idl/github.com/uber/cadence/history.thrift b/idl/github.com/uber/cadence/history.thrift index 1fb86f1a252..6524ce487dd 100644 --- a/idl/github.com/uber/cadence/history.thrift +++ b/idl/github.com/uber/cadence/history.thrift @@ -343,6 +343,11 @@ struct QueryWorkflowResponse { 10: optional binary queryResult } +struct ReapplyEventsRequest { + 10: optional string domainUUID + 20: optional shared.ReapplyEventsRequest request +} + /** * HistoryService provides API to start a new long running workflow instance, as well as query and update the history * of workflow instances already created. @@ -825,7 +830,7 @@ service HistoryService { /** * ReapplyEvents applies stale events to the current workflow and current run **/ - void ReapplyEvents(1: shared.ReapplyEventsRequest reapplyEventsRequest) + void ReapplyEvents(1: ReapplyEventsRequest reapplyEventsRequest) throws ( 1: shared.BadRequestError badRequestError, 2: shared.InternalServiceError internalServiceError, diff --git a/service/frontend/dcRedirectionHandler_test.go b/service/frontend/dcRedirectionHandler_test.go index 39a6d60855a..14b60e5e44f 100644 --- a/service/frontend/dcRedirectionHandler_test.go +++ b/service/frontend/dcRedirectionHandler_test.go @@ -104,7 +104,7 @@ func (s *dcRedirectionHandlerSuite) SetupTest() { s.mockArchivalMetadata = &archiver.MockArchivalMetadata{} s.mockArchiverProvider = &provider.MockArchiverProvider{} s.mockClientBean.On("GetRemoteFrontendClient", s.alternativeClusterName).Return(s.mockRemoteFrontendClient) - s.service = service.NewTestService(s.mockClusterMetadata, nil, metricsClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + s.service = service.NewTestService(s.mockClusterMetadata, nil, metricsClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) s.domainCache = cache.NewDomainCache(s.mockMetadataMgr, s.service.GetClusterMetadata(), s.service.GetMetricsClient(), s.service.GetLogger()) frontendHandler := NewWorkflowHandler(s.service, s.config, s.mockMetadataMgr, nil, nil, nil, nil, s.domainCache) diff --git a/service/frontend/workflowHandler.go b/service/frontend/workflowHandler.go index 6724412d857..ddda81b5e10 100644 --- a/service/frontend/workflowHandler.go +++ b/service/frontend/workflowHandler.go @@ -3487,8 +3487,27 @@ func (wh *WorkflowHandler) ReapplyEvents( if request == nil { return wh.error(errRequestNotSet, scope) } + if request.DomainName == nil || request.GetDomainName() == "" { + return wh.error(errDomainNotSet, scope) + } + if request.WorkflowExecution == nil { + return wh.error(errExecutionNotSet, scope) + } + if request.GetWorkflowExecution().GetWorkflowId() == "" { + return wh.error(errWorkflowIDNotSet, scope) + } + if request.GetEvents() == nil { + return wh.error(errWorkflowIDNotSet, scope) + } + domainEntry, err := wh.domainCache.GetDomain(request.GetDomainName()) + if err != nil { + return wh.error(err, scope) + } - err = wh.history.ReapplyEvents(ctx, request) + err = wh.history.ReapplyEvents(ctx, &h.ReapplyEventsRequest{ + DomainUUID: common.StringPtr(domainEntry.GetInfo().ID), + Request: request, + }) if err != nil { return wh.error(err, scope) } diff --git a/service/frontend/workflowHandler_test.go b/service/frontend/workflowHandler_test.go index ebf58bcdc95..8cd97171123 100644 --- a/service/frontend/workflowHandler_test.go +++ b/service/frontend/workflowHandler_test.go @@ -122,6 +122,7 @@ func (s *workflowHandlerSuite) SetupTest() { s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, + nil, ) } @@ -517,7 +518,7 @@ func (s *workflowHandlerSuite) TestRegisterDomain_Failure_InvalidArchivalURI() { s.mockArchiverProvider.On("GetHistoryArchiver", mock.Anything, mock.Anything).Return(mHistoryArchiver, nil) s.mockArchiverProvider.On("GetVisibilityArchiver", mock.Anything, mock.Anything).Return(mVisibilityArchiver, nil) - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) wh := s.getWorkflowHandlerWithParams(mService, config, mMetadataManager) wh.metricsClient = wh.Service.GetMetricsClient() wh.startWG.Done() @@ -552,7 +553,7 @@ func (s *workflowHandlerSuite) TestRegisterDomain_Success_EnabledWithNoArchivalU s.mockArchiverProvider.On("GetHistoryArchiver", mock.Anything, mock.Anything).Return(mHistoryArchiver, nil) s.mockArchiverProvider.On("GetVisibilityArchiver", mock.Anything, mock.Anything).Return(mVisibilityArchiver, nil) - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) wh := s.getWorkflowHandlerWithParams(mService, config, mMetadataManager) wh.metricsClient = wh.Service.GetMetricsClient() wh.startWG.Done() @@ -582,7 +583,7 @@ func (s *workflowHandlerSuite) TestRegisterDomain_Success_EnabledWithArchivalURI s.mockArchiverProvider.On("GetHistoryArchiver", mock.Anything, mock.Anything).Return(mHistoryArchiver, nil) s.mockArchiverProvider.On("GetVisibilityArchiver", mock.Anything, mock.Anything).Return(mVisibilityArchiver, nil) - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) wh := s.getWorkflowHandlerWithParams(mService, config, mMetadataManager) wh.metricsClient = wh.Service.GetMetricsClient() wh.startWG.Done() @@ -611,7 +612,7 @@ func (s *workflowHandlerSuite) TestRegisterDomain_Success_ClusterNotConfiguredFo ID: "test-id", }, nil) - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) wh := s.getWorkflowHandlerWithParams(mService, config, mMetadataManager) wh.metricsClient = wh.Service.GetMetricsClient() wh.startWG.Done() @@ -640,7 +641,7 @@ func (s *workflowHandlerSuite) TestRegisterDomain_Success_NotEnabled() { ID: "test-id", }, nil) - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) wh := s.getWorkflowHandlerWithParams(mService, config, mMetadataManager) wh.metricsClient = wh.Service.GetMetricsClient() wh.startWG.Done() @@ -658,7 +659,7 @@ func (s *workflowHandlerSuite) TestDescribeDomain_Success_ArchivalDisabled() { &archivalState{status: shared.ArchivalStatusDisabled, URI: ""}, ) mMetadataManager.On("GetDomain", mock.Anything).Return(getDomainResp, nil) - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) wh := s.getWorkflowHandlerWithParams(mService, config, mMetadataManager) wh.metricsClient = wh.Service.GetMetricsClient() wh.startWG.Done() @@ -685,7 +686,7 @@ func (s *workflowHandlerSuite) TestDescribeDomain_Success_ArchivalEnabled() { &archivalState{status: shared.ArchivalStatusEnabled, URI: testVisibilityArchivalURI}, ) mMetadataManager.On("GetDomain", mock.Anything).Return(getDomainResp, nil) - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) wh := s.getWorkflowHandlerWithParams(mService, config, mMetadataManager) wh.metricsClient = wh.Service.GetMetricsClient() wh.startWG.Done() @@ -724,7 +725,7 @@ func (s *workflowHandlerSuite) TestUpdateDomain_Failure_UpdateExistingArchivalUR mHistoryArchiver.On("ValidateURI", mock.Anything).Return(nil) s.mockArchiverProvider.On("GetHistoryArchiver", mock.Anything, mock.Anything).Return(mHistoryArchiver, nil) - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) wh := s.getWorkflowHandlerWithParams(mService, config, mMetadataManager) wh.metricsClient = wh.Service.GetMetricsClient() wh.startWG.Done() @@ -758,7 +759,7 @@ func (s *workflowHandlerSuite) TestUpdateDomain_Failure_InvalidArchivalURI() { mHistoryArchiver := &archiver.HistoryArchiverMock{} mHistoryArchiver.On("ValidateURI", mock.Anything).Return(errors.New("invalid URI")) s.mockArchiverProvider.On("GetHistoryArchiver", mock.Anything, mock.Anything).Return(mHistoryArchiver, nil) - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) wh := s.getWorkflowHandlerWithParams(mService, config, mMetadataManager) wh.metricsClient = wh.Service.GetMetricsClient() wh.startWG.Done() @@ -790,7 +791,7 @@ func (s *workflowHandlerSuite) TestUpdateDomain_Success_ArchivalEnabledToArchiva s.mockClusterMetadata.On("GetCurrentClusterName").Return(cluster.TestCurrentClusterName) s.mockArchivalMetadata.On("GetHistoryConfig").Return(archiver.NewArchivalConfig("enabled", dc.GetStringPropertyFn("enabled"), dc.GetBoolPropertyFn(true), "disabled", "some random URI")) s.mockArchivalMetadata.On("GetVisibilityConfig").Return(archiver.NewArchivalConfig("enabled", dc.GetStringPropertyFn("enabled"), dc.GetBoolPropertyFn(true), "disabled", "some random URI")) - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) mHistoryArchiver := &archiver.HistoryArchiverMock{} mHistoryArchiver.On("ValidateURI", mock.Anything).Return(nil) mVisibilityArchiver := &archiver.VisibilityArchiverMock{} @@ -834,7 +835,7 @@ func (s *workflowHandlerSuite) TestUpdateDomain_Success_ClusterNotConfiguredForA s.mockClusterMetadata.On("GetCurrentClusterName").Return(cluster.TestCurrentClusterName) s.mockArchivalMetadata.On("GetHistoryConfig").Return(archiver.NewDisabledArchvialConfig()) s.mockArchivalMetadata.On("GetVisibilityConfig").Return(archiver.NewDisabledArchvialConfig()) - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) wh := s.getWorkflowHandlerWithParams(mService, config, mMetadataManager) wh.metricsClient = wh.Service.GetMetricsClient() wh.startWG.Done() @@ -867,7 +868,7 @@ func (s *workflowHandlerSuite) TestUpdateDomain_Success_ArchivalEnabledToArchiva s.mockClusterMetadata.On("GetCurrentClusterName").Return(cluster.TestCurrentClusterName) s.mockArchivalMetadata.On("GetHistoryConfig").Return(archiver.NewArchivalConfig("enabled", dc.GetStringPropertyFn("enabled"), dc.GetBoolPropertyFn(true), "disabled", "some random URI")) s.mockArchivalMetadata.On("GetVisibilityConfig").Return(archiver.NewArchivalConfig("enabled", dc.GetStringPropertyFn("enabled"), dc.GetBoolPropertyFn(true), "disabled", "some random URI")) - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) mHistoryArchiver := &archiver.HistoryArchiverMock{} mHistoryArchiver.On("ValidateURI", mock.Anything).Return(nil) mVisibilityArchiver := &archiver.VisibilityArchiverMock{} @@ -911,7 +912,7 @@ func (s *workflowHandlerSuite) TestUpdateDomain_Success_ArchivalEnabledToEnabled s.mockClusterMetadata.On("GetCurrentClusterName").Return(cluster.TestCurrentClusterName) s.mockArchivalMetadata.On("GetHistoryConfig").Return(archiver.NewArchivalConfig("enabled", dc.GetStringPropertyFn("enabled"), dc.GetBoolPropertyFn(true), "disabled", "some random URI")) s.mockArchivalMetadata.On("GetVisibilityConfig").Return(archiver.NewArchivalConfig("enabled", dc.GetStringPropertyFn("enabled"), dc.GetBoolPropertyFn(true), "disabled", "some random URI")) - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) mHistoryArchiver := &archiver.HistoryArchiverMock{} mHistoryArchiver.On("ValidateURI", mock.Anything).Return(nil) mVisibilityArchiver := &archiver.VisibilityArchiverMock{} @@ -955,7 +956,7 @@ func (s *workflowHandlerSuite) TestUpdateDomain_Success_ArchivalNeverEnabledToEn s.mockClusterMetadata.On("GetCurrentClusterName").Return(cluster.TestCurrentClusterName) s.mockArchivalMetadata.On("GetHistoryConfig").Return(archiver.NewArchivalConfig("enabled", dc.GetStringPropertyFn("enabled"), dc.GetBoolPropertyFn(true), "disabled", "some random URI")) s.mockArchivalMetadata.On("GetVisibilityConfig").Return(archiver.NewArchivalConfig("enabled", dc.GetStringPropertyFn("enabled"), dc.GetBoolPropertyFn(true), "disabled", "some random URI")) - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) mHistoryArchiver := &archiver.HistoryArchiverMock{} mHistoryArchiver.On("ValidateURI", mock.Anything).Return(nil) mVisibilityArchiver := &archiver.VisibilityArchiverMock{} @@ -1053,7 +1054,7 @@ func (s *workflowHandlerSuite) TestGetArchivedHistory_Failure_ArchivalURIEmpty() s.mockClusterMetadata.On("IsGlobalDomainEnabled").Return(false) s.mockClusterMetadata.On("GetAllClusterInfo").Return(cluster.TestAllClusterInfo) s.mockClusterMetadata.On("GetCurrentClusterName").Return(cluster.TestCurrentClusterName) - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) wh := s.getWorkflowHandlerWithParams(mService, config, mMetadataManager) wh.metricsClient = wh.Service.GetMetricsClient() wh.startWG.Done() @@ -1073,7 +1074,7 @@ func (s *workflowHandlerSuite) TestGetArchivedHistory_Failure_InvalidURI() { s.mockClusterMetadata.On("IsGlobalDomainEnabled").Return(false) s.mockClusterMetadata.On("GetAllClusterInfo").Return(cluster.TestAllClusterInfo) s.mockClusterMetadata.On("GetCurrentClusterName").Return(cluster.TestCurrentClusterName) - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) wh := s.getWorkflowHandlerWithParams(mService, config, mMetadataManager) wh.metricsClient = wh.Service.GetMetricsClient() wh.startWG.Done() @@ -1093,7 +1094,7 @@ func (s *workflowHandlerSuite) TestGetArchivedHistory_Success_GetFirstPage() { s.mockClusterMetadata.On("IsGlobalDomainEnabled").Return(false) s.mockClusterMetadata.On("GetAllClusterInfo").Return(cluster.TestAllClusterInfo) s.mockClusterMetadata.On("GetCurrentClusterName").Return(cluster.TestCurrentClusterName) - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) mHistoryArchiver := &archiver.HistoryArchiverMock{} nextPageToken := []byte{'1', '2', '3'} historyBatch1 := &gen.History{ @@ -1158,7 +1159,7 @@ func (s *workflowHandlerSuite) TestGetHistory() { Size: 1, LastFirstEventID: nextEventID, }, nil).Once() - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) mMetadataManager := &mocks.MetadataManager{} wh := s.getWorkflowHandlerWithParams(mService, config, mMetadataManager) wh.metricsClient = wh.Service.GetMetricsClient() @@ -1182,7 +1183,7 @@ func (s *workflowHandlerSuite) TestListArchivedVisibility_Failure_InvalidRequest func (s *workflowHandlerSuite) TestListArchivedVisibility_Failure_ClusterNotConfiguredForArchival() { config := s.newConfig() s.mockArchivalMetadata.On("GetVisibilityConfig").Return(archiver.NewDisabledArchvialConfig()) - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) wh := s.getWorkflowHandlerWithParams(mService, config, &mocks.MetadataManager{}) wh.metricsClient = wh.Service.GetMetricsClient() wh.startWG.Done() @@ -1216,7 +1217,7 @@ func (s *workflowHandlerSuite) TestListArchivedVisibility_Failure_DomainNotConfi s.mockClusterMetadata.On("GetAllClusterInfo").Return(cluster.TestAllClusterInfo) s.mockClusterMetadata.On("GetCurrentClusterName").Return(cluster.TestCurrentClusterName) s.mockArchivalMetadata.On("GetVisibilityConfig").Return(archiver.NewArchivalConfig("enabled", dc.GetStringPropertyFn("enabled"), dc.GetBoolPropertyFn(true), "disabled", "random URI")) - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) wh := s.getWorkflowHandlerWithParams(mService, config, mMetadataManager) wh.metricsClient = wh.Service.GetMetricsClient() wh.startWG.Done() @@ -1238,7 +1239,7 @@ func (s *workflowHandlerSuite) TestListArchivedVisibility_Failure_InvalidURI() { s.mockClusterMetadata.On("GetAllClusterInfo").Return(cluster.TestAllClusterInfo) s.mockClusterMetadata.On("GetCurrentClusterName").Return(cluster.TestCurrentClusterName) s.mockArchivalMetadata.On("GetVisibilityConfig").Return(archiver.NewArchivalConfig("enabled", dc.GetStringPropertyFn("enabled"), dc.GetBoolPropertyFn(true), "disabled", "random URI")) - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) wh := s.getWorkflowHandlerWithParams(mService, config, mMetadataManager) wh.metricsClient = wh.Service.GetMetricsClient() wh.startWG.Done() @@ -1262,7 +1263,7 @@ func (s *workflowHandlerSuite) TestListArchivedVisibility_Success() { mVisibilityArchiver := &archiver.VisibilityArchiverMock{} mVisibilityArchiver.On("Query", mock.Anything, mock.Anything, mock.Anything).Return(&archiver.QueryVisibilityResponse{}, nil) s.mockArchiverProvider.On("GetVisibilityArchiver", mock.Anything, mock.Anything).Return(mVisibilityArchiver, nil) - mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider) + mService := cs.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, nil) wh := s.getWorkflowHandlerWithParams(mService, config, mMetadataManager) wh.metricsClient = wh.Service.GetMetricsClient() wh.startWG.Done() diff --git a/service/history/MockHistoryEngine.go b/service/history/MockHistoryEngine.go index f6e40b406b2..decfeb6c586 100644 --- a/service/history/MockHistoryEngine.go +++ b/service/history/MockHistoryEngine.go @@ -599,3 +599,17 @@ func (_m *MockHistoryEngine) GetReplicationMessages(ctx context.Context, taskID return r0, r1 } + +// ReapplyEvents is mock implementation for ReapplyEvents of HistoryEngine +func (_m *MockHistoryEngine) ReapplyEvents(ctx context.Context, domainUUID string, workflowID string, events []*shared.HistoryEvent) error { + ret := _m.Called(domainUUID, workflowID, events) + + var r0 error + if rf, ok := ret.Get(0).(func(string, string, []*shared.HistoryEvent) error); ok { + r0 = rf(domainUUID, workflowID, events) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/service/history/MockWorkflowExecutionContext.go b/service/history/MockWorkflowExecutionContext.go index f829d4dc06a..8d99355f8bb 100644 --- a/service/history/MockWorkflowExecutionContext.go +++ b/service/history/MockWorkflowExecutionContext.go @@ -337,3 +337,16 @@ func (_m *mockWorkflowExecutionContext) getQueryRegistry() QueryRegistry { return r0 } + +func (_m *mockWorkflowExecutionContext) reapplyEvents(_a0 context.Context, _a1 string, _a2 string, _a3 []*workflow.HistoryEvent) error { + ret := _m.Called(_a0, _a1, _a2, _a3) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, []*workflow.HistoryEvent) error); ok { + r0 = rf(_a0, _a1, _a2, _a3) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/service/history/conflictResolver_test.go b/service/history/conflictResolver_test.go index 3c21429f8c8..1c380e631e0 100644 --- a/service/history/conflictResolver_test.go +++ b/service/history/conflictResolver_test.go @@ -97,7 +97,14 @@ func (s *conflictResolverSuite) SetupTest() { s.mockMetadataMgr = &mocks.MetadataManager{} metricsClient := metrics.NewClient(tally.NoopScope, metrics.History) s.mockClientBean = &client.MockClientBean{} - s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, metricsClient, s.mockClientBean, nil, nil) + s.mockService = service.NewTestService( + s.mockClusterMetadata, + s.mockMessagingClient, + metricsClient, + s.mockClientBean, + nil, + nil, + nil) s.mockDomainCache = &cache.DomainCacheMock{} s.mockEventsCache = &MockEventsCache{} diff --git a/service/history/handler.go b/service/history/handler.go index f31bfb13787..0d7c2805f37 100644 --- a/service/history/handler.go +++ b/service/history/handler.go @@ -1542,9 +1542,40 @@ func (h *Handler) GetReplicationMessages( // ReapplyEvents applies stale events to the current workflow and the current run func (h *Handler) ReapplyEvents( ctx context.Context, - request *gen.ReapplyEventsRequest, -) error { - return &gen.BadRequestError{Message: "This API is not implemented yet"} + request *hist.ReapplyEventsRequest, +) (retError error) { + + defer log.CapturePanic(h.GetLogger(), &retError) + h.startWG.Wait() + + scope := metrics.HistoryReapplyEventsScope + h.metricsClient.IncCounter(scope, metrics.CadenceRequests) + sw := h.metricsClient.StartTimer(scope, metrics.CadenceLatency) + defer sw.Stop() + + domainID := request.GetDomainUUID() + workflowID := request.GetRequest().GetWorkflowExecution().GetWorkflowId() + engine, err := h.controller.GetEngine(workflowID) + if err != nil { + return h.error(err, scope, domainID, workflowID) + } + // deserialize history event object + historyEvents, err := h.GetPayloadSerializer().DeserializeBatchEvents(&persistence.DataBlob{ + Encoding: common.EncodingTypeThriftRW, + Data: request.GetRequest().GetEvents().GetData(), + }) + if err != nil { + return h.error(err, scope, domainID, workflowID) + } + if err := engine.ReapplyEvents( + ctx, + request.GetDomainUUID(), + request.GetRequest().GetWorkflowExecution().GetWorkflowId(), + historyEvents, + ); err != nil { + return h.error(err, scope, domainID, workflowID) + } + return nil } // convertError is a helper method to convert ShardOwnershipLostError from persistence layer returned by various diff --git a/service/history/historyCache_test.go b/service/history/historyCache_test.go index 0c16e501e88..84dcad09fcd 100644 --- a/service/history/historyCache_test.go +++ b/service/history/historyCache_test.go @@ -84,7 +84,14 @@ func (s *historyCacheSuite) SetupTest() { s.mockMessagingClient = mocks.NewMockMessagingClient(s.mockProducer, nil) metricsClient := metrics.NewClient(tally.NoopScope, metrics.History) s.mockClientBean = &client.MockClientBean{} - s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, metricsClient, s.mockClientBean, nil, nil) + s.mockService = service.NewTestService( + s.mockClusterMetadata, + s.mockMessagingClient, + metricsClient, + s.mockClientBean, + nil, + nil, + nil) s.mockShard = &shardContextImpl{ service: s.mockService, shardInfo: &persistence.ShardInfo{ShardID: 0, RangeID: 1, TransferAckLevel: 0}, diff --git a/service/history/historyEngine.go b/service/history/historyEngine.go index 8d98b58bb73..503d024d87c 100644 --- a/service/history/historyEngine.go +++ b/service/history/historyEngine.go @@ -29,6 +29,8 @@ import ( "time" "github.com/pborman/uuid" + "go.uber.org/cadence/.gen/go/cadence/workflowserviceclient" + h "github.com/uber/cadence/.gen/go/history" r "github.com/uber/cadence/.gen/go/replicator" workflow "github.com/uber/cadence/.gen/go/shared" @@ -48,7 +50,6 @@ import ( "github.com/uber/cadence/common/service/config" warchiver "github.com/uber/cadence/service/worker/archiver" "github.com/uber/cadence/service/worker/replicator" - "go.uber.org/cadence/.gen/go/cadence/workflowserviceclient" ) const ( @@ -86,6 +87,7 @@ type ( resetor workflowResetor replicationTaskProcessors []*ReplicationTaskProcessor publicClient workflowserviceclient.Interface + eventsReapplier nDCEventsReapplier } ) @@ -209,7 +211,8 @@ func NewEngineWithShardContext( logger, ) } - historyEngImpl.resetor = newWorkflowResetor(historyEngImpl) + resetor := newWorkflowResetor(historyEngImpl) + historyEngImpl.resetor = resetor historyEngImpl.decisionHandler = newDecisionHandler(historyEngImpl) var replicationTaskProcessors []*ReplicationTaskProcessor @@ -220,7 +223,7 @@ func NewEngineWithShardContext( historyEngImpl.replicationTaskProcessors = replicationTaskProcessors shard.SetEngine(historyEngImpl) - + historyEngImpl.eventsReapplier = newNDCEventsReapplier(shard.GetMetricsClient(), logger) return historyEngImpl } @@ -2425,3 +2428,54 @@ func (e *historyEngineImpl) GetReplicationMessages(ctx ctx.Context, taskID int64 e.logger.Debug("Successfully fetched replication messages.", tag.Counter(len(replicationMessages.ReplicationTasks))) return replicationMessages, nil } + +func (e *historyEngineImpl) ReapplyEvents( + ctx ctx.Context, + domainUUID string, + workflowID string, + reapplyEvents []*workflow.HistoryEvent, +) error { + + domainEntry, err := e.getActiveDomainEntry(common.StringPtr(domainUUID)) + if err != nil { + return err + } + domainID := domainEntry.GetInfo().ID + // remove run id from the execution so that reapply events to the current run + execution := workflow.WorkflowExecution{ + WorkflowId: common.StringPtr(workflowID), + } + + return e.updateWorkflowExecutionWithAction( + ctx, + domainID, + execution, + func(msBuilder mutableState, tBuilder *timerBuilder) (*updateWorkflowAction, error) { + createDecisionTask := true + // Do not create decision task when the workflow is cron and the cron has not been started yet + if msBuilder.GetExecutionInfo().CronSchedule != "" && !msBuilder.HasProcessedOrPendingDecision() { + createDecisionTask = false + } + // TODO when https://github.com/uber/cadence/issues/2420 is finished + // reset to workflow finish event + // ignore this case for now + if !msBuilder.IsWorkflowExecutionRunning() { + e.logger.Warn("failed to reapply event to a finished workflow", tag.WorkflowDomainID(domainID), tag.WorkflowID(workflowID)) + e.metricsClient.IncCounter(metrics.HistoryReapplyEventsScope, metrics.EventReapplySkippedCount) + return nil, nil + } + postActions := &updateWorkflowAction{ + createDecision: createDecisionTask, + } + if err := e.eventsReapplier.reapplyEvents( + ctx, + msBuilder, + reapplyEvents, + ); err != nil { + e.logger.Error("failed to re-apply stale events", tag.Error(err)) + return nil, &workflow.InternalServiceError{Message: "unable to re-apply stale events"} + } + + return postActions, nil + }) +} diff --git a/service/history/historyEngine2_test.go b/service/history/historyEngine2_test.go index 2131aad3b94..ba22be03cf2 100644 --- a/service/history/historyEngine2_test.go +++ b/service/history/historyEngine2_test.go @@ -121,8 +121,14 @@ func (s *engine2Suite) SetupTest() { metricsClient := metrics.NewClient(tally.NoopScope, metrics.History) s.mockMessagingClient = mocks.NewMockMessagingClient(s.mockProducer, nil) s.mockClientBean = &client.MockClientBean{} - s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, metricsClient, s.mockClientBean, nil, nil) - + s.mockService = service.NewTestService( + s.mockClusterMetadata, + s.mockMessagingClient, + metricsClient, + s.mockClientBean, + nil, + nil, + nil) s.mockDomainCache = &cache.DomainCacheMock{} s.mockDomainCache.On("GetDomainByID", mock.Anything).Return(cache.NewLocalDomainCacheEntryForTest( &p.DomainInfo{ID: testDomainID}, &p.DomainConfig{}, "", nil, diff --git a/service/history/historyEngine3_eventsv2_test.go b/service/history/historyEngine3_eventsv2_test.go index c742a91ca40..01fcba3109d 100644 --- a/service/history/historyEngine3_eventsv2_test.go +++ b/service/history/historyEngine3_eventsv2_test.go @@ -117,8 +117,14 @@ func (s *engine3Suite) SetupTest() { metricsClient := metrics.NewClient(tally.NoopScope, metrics.History) s.mockMessagingClient = mocks.NewMockMessagingClient(s.mockProducer, nil) s.mockClientBean = &client.MockClientBean{} - s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, metricsClient, s.mockClientBean, nil, nil) - + s.mockService = service.NewTestService( + s.mockClusterMetadata, + s.mockMessagingClient, + metricsClient, + s.mockClientBean, + nil, + nil, + nil) s.mockDomainCache = &cache.DomainCacheMock{} s.mockArchivalClient = &archiver.ClientMock{} s.mockEventsCache = &MockEventsCache{} diff --git a/service/history/historyEngineInterfaces.go b/service/history/historyEngineInterfaces.go index f7450db83fd..be0fec82f59 100644 --- a/service/history/historyEngineInterfaces.go +++ b/service/history/historyEngineInterfaces.go @@ -77,6 +77,7 @@ type ( SyncActivity(ctx context.Context, request *h.SyncActivityRequest) error GetReplicationMessages(ctx context.Context, taskID int64) (*replicator.ReplicationMessages, error) QueryWorkflow(ctx context.Context, request *h.QueryWorkflowRequest) (*h.QueryWorkflowResponse, error) + ReapplyEvents(ctx context.Context, domainUUID string, workflowID string, events []*workflow.HistoryEvent) error NotifyNewHistoryEvent(event *historyEventNotification) NotifyNewTransferTasks(tasks []persistence.Task) diff --git a/service/history/historyEngine_test.go b/service/history/historyEngine_test.go index f4ecec302b2..9458ac3603b 100644 --- a/service/history/historyEngine_test.go +++ b/service/history/historyEngine_test.go @@ -197,7 +197,14 @@ func (s *engineSuite) SetupTest() { s.mockMetricClient = metrics.NewClient(tally.NoopScope, metrics.History) s.mockMessagingClient = mocks.NewMockMessagingClient(s.mockProducer, nil) s.mockClientBean = &client.MockClientBean{} - s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.mockMetricClient, s.mockClientBean, nil, nil) + s.mockService = service.NewTestService( + s.mockClusterMetadata, + s.mockMessagingClient, + s.mockMetricClient, + s.mockClientBean, + nil, + nil, + nil) s.mockEventsCache = &MockEventsCache{} historyEventNotifier := newHistoryEventNotifier( diff --git a/service/history/historyReplicator_test.go b/service/history/historyReplicator_test.go index 7eba3adb5b4..eafeb9fbb05 100644 --- a/service/history/historyReplicator_test.go +++ b/service/history/historyReplicator_test.go @@ -106,7 +106,14 @@ func (s *historyReplicatorSuite) SetupTest() { s.mockMetadataMgr = &mocks.MetadataManager{} metricsClient := metrics.NewClient(tally.NoopScope, metrics.History) s.mockClientBean = &client.MockClientBean{} - s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, metricsClient, s.mockClientBean, nil, nil) + s.mockService = service.NewTestService( + s.mockClusterMetadata, + s.mockMessagingClient, + metricsClient, + s.mockClientBean, + nil, + nil, + nil) s.mockShard = &shardContextImpl{ service: s.mockService, diff --git a/service/history/historyTestBase.go b/service/history/historyTestBase.go index 296253d3607..9bc08fc1c9b 100644 --- a/service/history/historyTestBase.go +++ b/service/history/historyTestBase.go @@ -98,7 +98,7 @@ func newTestShardContext(shardInfo *persistence.ShardInfo, transferSequenceNumbe clientBean client.Bean, config *Config, logger log.Logger) *TestShardContext { metricsClient := metrics.NewClient(tally.NoopScope, metrics.History) domainCache := cache.NewDomainCache(metadataMgr, clusterMetadata, metricsClient, logger) - + serializer := persistence.NewPayloadSerializer() // initialize the cluster current time to be the same as ack level standbyClusterCurrentTime := make(map[string]time.Time) timerMaxReadLevelMap := make(map[string]time.Time) @@ -121,8 +121,15 @@ func newTestShardContext(shardInfo *persistence.ShardInfo, transferSequenceNumbe } shardCtx := &TestShardContext{ - shardID: 0, - service: service.NewTestService(clusterMetadata, nil, metricsClient, clientBean, nil, nil), + shardID: 0, + service: service.NewTestService( + clusterMetadata, + nil, + metricsClient, + clientBean, + nil, + nil, + serializer), shardInfo: shardInfo, transferSequenceNumber: transferSequenceNumber, historyMgr: historyMgr, diff --git a/service/history/nDCBranchMgr_test.go b/service/history/nDCBranchMgr_test.go index d0125579914..f3d71f844ae 100644 --- a/service/history/nDCBranchMgr_test.go +++ b/service/history/nDCBranchMgr_test.go @@ -75,8 +75,14 @@ func (s *nDCBranchMgrSuite) SetupTest() { s.mockHistoryV2Mgr = &mocks.HistoryV2Manager{} s.mockClusterMetadata = &mocks.ClusterMetadata{} metricsClient := metrics.NewClient(tally.NoopScope, metrics.History) - s.mockService = service.NewTestService(s.mockClusterMetadata, nil, metricsClient, nil, nil, nil) - + s.mockService = service.NewTestService( + s.mockClusterMetadata, + nil, + metricsClient, + nil, + nil, + nil, + nil) s.mockShard = &shardContextImpl{ service: s.mockService, domainCache: s.mockDomainCache, diff --git a/service/history/nDCConflictResolver_test.go b/service/history/nDCConflictResolver_test.go index 9fb733fd524..9575bcf2e5c 100644 --- a/service/history/nDCConflictResolver_test.go +++ b/service/history/nDCConflictResolver_test.go @@ -68,8 +68,14 @@ func TestNDCConflictResolverSuite(t *testing.T) { func (s *nDCConflictResolverSuite) SetupTest() { s.logger = loggerimpl.NewDevelopmentForTest(s.Suite) metricsClient := metrics.NewClient(tally.NoopScope, metrics.History) - s.mockService = service.NewTestService(nil, nil, metricsClient, nil, nil, nil) - + s.mockService = service.NewTestService( + nil, + nil, + metricsClient, + nil, + nil, + nil, + nil) s.mockShard = &shardContextImpl{ service: s.mockService, shardInfo: &persistence.ShardInfo{ShardID: 10, RangeID: 1, TransferAckLevel: 0}, diff --git a/service/history/nDCEventsReapplier.go b/service/history/nDCEventsReapplier.go index adc91d2603d..bcc93cafc28 100644 --- a/service/history/nDCEventsReapplier.go +++ b/service/history/nDCEventsReapplier.go @@ -32,7 +32,11 @@ import ( type ( nDCEventsReapplier interface { - reapplyEvents(ctx ctx.Context, msBuilder mutableState, historyEvents []*shared.HistoryEvent) error + reapplyEvents( + ctx ctx.Context, + msBuilder mutableState, + historyEvents []*shared.HistoryEvent, + ) error } nDCEventsReapplierImpl struct { @@ -57,6 +61,7 @@ func (r *nDCEventsReapplierImpl) reapplyEvents( msBuilder mutableState, historyEvents []*shared.HistoryEvent, ) error { + reapplyEvents := []*shared.HistoryEvent{} // TODO: need to implement Reapply policy for _, event := range historyEvents { diff --git a/service/history/nDCStateRebuilder_test.go b/service/history/nDCStateRebuilder_test.go index ee3cdd1c719..f7b7bc2eef9 100644 --- a/service/history/nDCStateRebuilder_test.go +++ b/service/history/nDCStateRebuilder_test.go @@ -79,7 +79,14 @@ func (s *nDCStateRebuilderSuite) SetupTest() { s.mockHistoryV2Mgr = &mocks.HistoryV2Manager{} s.mockClusterMetadata = &mocks.ClusterMetadata{} metricsClient := metrics.NewClient(tally.NoopScope, metrics.History) - s.mockService = service.NewTestService(s.mockClusterMetadata, nil, metricsClient, nil, nil, nil) + s.mockService = service.NewTestService( + s.mockClusterMetadata, + nil, + metricsClient, + nil, + nil, + nil, + nil) s.mockDomainCache = &cache.DomainCacheMock{} s.mockEventsCache = &MockEventsCache{} diff --git a/service/history/nDCTransactionMgr.go b/service/history/nDCTransactionMgr.go index 610d7523aad..169dd6bf807 100644 --- a/service/history/nDCTransactionMgr.go +++ b/service/history/nDCTransactionMgr.go @@ -136,6 +136,7 @@ type ( historyCache *historyCache clusterMetadata cluster.Metadata historyV2Mgr persistence.HistoryV2Manager + serializer persistence.PayloadSerializer metricsClient metrics.Client logger log.Logger @@ -158,6 +159,7 @@ func newNDCTransactionMgr( historyCache: historyCache, clusterMetadata: shard.GetService().GetClusterMetadata(), historyV2Mgr: shard.GetHistoryV2Manager(), + serializer: shard.GetService().GetPayloadSerializer(), metricsClient: shard.GetMetricsClient(), logger: logger.WithTags(tag.ComponentHistoryReplicator), diff --git a/service/history/nDCTransactionMgrForExistingWorkflow_mock.go b/service/history/nDCTransactionMgrForExistingWorkflow_mock.go index c182ef7d942..6425647a069 100644 --- a/service/history/nDCTransactionMgrForExistingWorkflow_mock.go +++ b/service/history/nDCTransactionMgrForExistingWorkflow_mock.go @@ -28,11 +28,11 @@ package history import ( - context "context" - reflect "reflect" - time "time" + "context" + "reflect" + "time" - gomock "github.com/golang/mock/gomock" + "github.com/golang/mock/gomock" ) // MocknDCTransactionMgrForExistingWorkflow is a mock of nDCTransactionMgrForExistingWorkflow interface diff --git a/service/history/nDCTransactionMgrForExistingWorkflow_test.go b/service/history/nDCTransactionMgrForExistingWorkflow_test.go index 843f79487d0..a70964f886a 100644 --- a/service/history/nDCTransactionMgrForExistingWorkflow_test.go +++ b/service/history/nDCTransactionMgrForExistingWorkflow_test.go @@ -60,7 +60,7 @@ func TestNDCTransactionMgrForExistingWorkflowSuite(t *testing.T) { func (s *nDCTransactionMgrForExistingWorkflowSuite) SetupTest() { s.logger = loggerimpl.NewDevelopmentForTest(s.Suite) metricsClient := metrics.NewClient(tally.NoopScope, metrics.History) - s.mockService = service.NewTestService(nil, nil, metricsClient, nil, nil, nil) + s.mockService = service.NewTestService(nil, nil, metricsClient, nil, nil, nil, nil) s.mockShard = &shardContextImpl{ service: s.mockService, diff --git a/service/history/nDCTransactionMgrForNewWorkflow.go b/service/history/nDCTransactionMgrForNewWorkflow.go index fbe480d8a78..f8df4929b3d 100644 --- a/service/history/nDCTransactionMgrForNewWorkflow.go +++ b/service/history/nDCTransactionMgrForNewWorkflow.go @@ -230,6 +230,15 @@ func (r *nDCTransactionMgrForNewWorkflowImpl) createAsZombie( return err } + if err := targetWorkflow.getContext().reapplyEvents( + ctx, + targetWorkflowSnapshot.ExecutionInfo.DomainID, + targetWorkflowSnapshot.ExecutionInfo.WorkflowID, + targetWorkflowEventsSeq[0].Events, + ); err != nil { + return err + } + createMode := persistence.CreateWorkflowModeZombie prevRunID := "" prevLastWriteVersion := int64(0) diff --git a/service/history/nDCTransactionMgrForNewWorkflow_mock.go b/service/history/nDCTransactionMgrForNewWorkflow_mock.go index b9b8ff0d15e..52daf7bf169 100644 --- a/service/history/nDCTransactionMgrForNewWorkflow_mock.go +++ b/service/history/nDCTransactionMgrForNewWorkflow_mock.go @@ -28,11 +28,11 @@ package history import ( - context "context" - reflect "reflect" - time "time" + "context" + "reflect" + "time" - gomock "github.com/golang/mock/gomock" + "github.com/golang/mock/gomock" ) // MocknDCTransactionMgrForNewWorkflow is a mock of nDCTransactionMgrForNewWorkflow interface @@ -67,7 +67,7 @@ func (m *MocknDCTransactionMgrForNewWorkflow) dispatchForNewWorkflow(ctx context } // dispatchForNewWorkflow indicates an expected call of dispatchForNewWorkflow -func (mr *MocknDCTransactionMgrForNewWorkflowMockRecorder) dispatchForNewWorkflow(ctx, now, targetWorkflow interface{}) *gomock.Call { +func (mr *MocknDCTransactionMgrForNewWorkflowMockRecorder) dispatchForNewWorkflow(ctx context.Context, now time.Time, targetWorkflow nDCWorkflow) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "dispatchForNewWorkflow", reflect.TypeOf((*MocknDCTransactionMgrForNewWorkflow)(nil).dispatchForNewWorkflow), ctx, now, targetWorkflow) } diff --git a/service/history/nDCTransactionMgrForNewWorkflow_test.go b/service/history/nDCTransactionMgrForNewWorkflow_test.go index 45594989167..32b8cd33bf8 100644 --- a/service/history/nDCTransactionMgrForNewWorkflow_test.go +++ b/service/history/nDCTransactionMgrForNewWorkflow_test.go @@ -26,6 +26,7 @@ import ( "time" "github.com/golang/mock/gomock" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" "github.com/uber-go/tally" @@ -60,7 +61,7 @@ func TestNDCTransactionMgrForNewWorkflowSuite(t *testing.T) { func (s *nDCTransactionMgrForNewWorkflowSuite) SetupTest() { s.logger = loggerimpl.NewDevelopmentForTest(s.Suite) metricsClient := metrics.NewClient(tally.NoopScope, metrics.History) - s.mockService = service.NewTestService(nil, nil, metricsClient, nil, nil, nil) + s.mockService = service.NewTestService(nil, nil, metricsClient, nil, nil, nil, nil) s.mockShard = &shardContextImpl{ service: s.mockService, @@ -265,7 +266,12 @@ func (s *nDCTransactionMgrForNewWorkflowSuite) TestDispatchForNewWorkflow_Create var currentReleaseFn releaseWorkflowExecutionFunc = func(error) { currentReleaseCalled = true } currentWorkflow.EXPECT().getReleaseFn().Return(currentReleaseFn).AnyTimes() - targetWorkflowSnapshot := &persistence.WorkflowSnapshot{} + targetWorkflowSnapshot := &persistence.WorkflowSnapshot{ + ExecutionInfo: &persistence.WorkflowExecutionInfo{ + DomainID: domainID, + WorkflowID: workflowID, + }, + } targetWorkflowEventsSeq := []*persistence.WorkflowEvents{&persistence.WorkflowEvents{}} targetWorkflowHistorySize := int64(12345) targetMutableState.On("GetExecutionInfo").Return(&persistence.WorkflowExecutionInfo{ @@ -295,6 +301,9 @@ func (s *nDCTransactionMgrForNewWorkflowSuite) TestDispatchForNewWorkflow_Create "", int64(0), ).Return(nil).Once() + targetContext.On("reapplyEvents", mock.Anything, domainID, workflowID, targetWorkflowEventsSeq[0].Events). + Return(nil). + Times(1) err := s.createMgr.dispatchForNewWorkflow(ctx, now, targetWorkflow) s.NoError(err) diff --git a/service/history/nDCTransactionMgr_mock.go b/service/history/nDCTransactionMgr_mock.go index 499385f4e77..aedec3f6bf6 100644 --- a/service/history/nDCTransactionMgr_mock.go +++ b/service/history/nDCTransactionMgr_mock.go @@ -130,3 +130,11 @@ func (mr *MocknDCTransactionMgrMockRecorder) loadNDCWorkflow(ctx, domainID, work mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "loadNDCWorkflow", reflect.TypeOf((*MocknDCTransactionMgr)(nil).loadNDCWorkflow), ctx, domainID, workflowID, runID) } + +// reapplyEvents mocks base method +func (m *MocknDCTransactionMgr) reapplyEvents(ctx context.Context, reapplyEvents *persistence.WorkflowEvents) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "reapplyEvents", ctx, reapplyEvents) + ret0, _ := ret[0].(error) + return ret0 +} diff --git a/service/history/nDCTransactionMgr_test.go b/service/history/nDCTransactionMgr_test.go index 3a08d31e878..9ac4abc691f 100644 --- a/service/history/nDCTransactionMgr_test.go +++ b/service/history/nDCTransactionMgr_test.go @@ -30,6 +30,9 @@ import ( "github.com/uber-go/tally" "github.com/uber/cadence/.gen/go/shared" + "github.com/uber/cadence/client" + "github.com/uber/cadence/common" + "github.com/uber/cadence/common/cache" "github.com/uber/cadence/common/clock" "github.com/uber/cadence/common/log" "github.com/uber/cadence/common/log/loggerimpl" @@ -47,10 +50,13 @@ type ( mockCreateMgr *MocknDCTransactionMgrForNewWorkflow mockUpdateMgr *MocknDCTransactionMgrForExistingWorkflow - mockService service.Service - mockShard *shardContextImpl - mockExecutionMgr *mocks.ExecutionManager - logger log.Logger + mockService service.Service + mockShard *shardContextImpl + mockExecutionMgr *mocks.ExecutionManager + mockClientBean *client.MockClientBean + mockDomainCache *cache.DomainCacheMock + mockClusterMetadata *mocks.ClusterMetadata + logger log.Logger transactionMgr *nDCTransactionMgrImpl } @@ -65,8 +71,10 @@ func (s *nDCTransactionMgrSuite) SetupTest() { s.logger = loggerimpl.NewDevelopmentForTest(s.Suite) s.mockExecutionMgr = &mocks.ExecutionManager{} metricsClient := metrics.NewClient(tally.NoopScope, metrics.History) - s.mockService = service.NewTestService(nil, nil, metricsClient, nil, nil, nil) - + s.mockClientBean = &client.MockClientBean{} + s.mockService = service.NewTestService(nil, nil, metricsClient, s.mockClientBean, nil, nil, nil) + s.mockDomainCache = &cache.DomainCacheMock{} + s.mockClusterMetadata = &mocks.ClusterMetadata{} s.mockShard = &shardContextImpl{ service: s.mockService, shardInfo: &persistence.ShardInfo{ShardID: 10, RangeID: 1, TransferAckLevel: 0}, @@ -78,6 +86,8 @@ func (s *nDCTransactionMgrSuite) SetupTest() { logger: s.logger, metricsClient: metricsClient, timeSource: clock.NewRealTimeSource(), + domainCache: s.mockDomainCache, + clusterMetadata: s.mockClusterMetadata, } s.transactionMgr = newNDCTransactionMgr(s.mockShard, newHistoryCache(s.mockShard), s.logger) @@ -142,14 +152,12 @@ func (s *nDCTransactionMgrSuite) TestBackfillWorkflow_CurrentGuaranteed() { workflow.EXPECT().getReleaseFn().Return(releaseFn).AnyTimes() mutableState.On("IsCurrentWorkflowGuaranteed").Return(true) - context.On( "persistNonFirstWorkflowEvents", workflowEvents, ).Return(int64(0), nil).Once() context.On( "updateWorkflowExecutionWithNew", now, persistence.UpdateWorkflowModeUpdateCurrent, nil, nil, transactionPolicyPassive, (*transactionPolicy)(nil), ).Return(nil).Once() - err := s.transactionMgr.backfillWorkflow(ctx, now, workflow, workflowEvents) s.NoError(err) s.True(releaseCalled) @@ -173,7 +181,13 @@ func (s *nDCTransactionMgrSuite) TestBackfillWorkflow_CheckDB_NotCurrent() { defer mutableState.AssertExpectations(s.T()) var releaseFn releaseWorkflowExecutionFunc = func(error) { releaseCalled = true } - workflowEvents := &persistence.WorkflowEvents{} + workflowEvents := &persistence.WorkflowEvents{ + Events: []*shared.HistoryEvent{{ + EventType: common.EventTypePtr(shared.EventTypeWorkflowExecutionSignaled), + }}, + DomainID: domainID, + WorkflowID: workflowID, + } workflow.EXPECT().getContext().Return(context).AnyTimes() workflow.EXPECT().getMutableState().Return(mutableState).AnyTimes() @@ -197,7 +211,6 @@ func (s *nDCTransactionMgrSuite) TestBackfillWorkflow_CheckDB_NotCurrent() { context.On( "updateWorkflowExecutionWithNew", now, persistence.UpdateWorkflowModeBypassCurrent, nil, nil, transactionPolicyPassive, (*transactionPolicy)(nil), ).Return(nil).Once() - err := s.transactionMgr.backfillWorkflow(ctx, now, workflow, workflowEvents) s.NoError(err) s.True(releaseCalled) diff --git a/service/history/nDCWorkflowResetter_test.go b/service/history/nDCWorkflowResetter_test.go index 3cc0df86eb8..16e219f6697 100644 --- a/service/history/nDCWorkflowResetter_test.go +++ b/service/history/nDCWorkflowResetter_test.go @@ -79,7 +79,7 @@ func (s *nDCWorkflowResetterSuite) SetupTest() { s.logger = loggerimpl.NewDevelopmentForTest(s.Suite) s.mockHistoryV2Mgr = &mocks.HistoryV2Manager{} metricsClient := metrics.NewClient(tally.NoopScope, metrics.History) - s.mockService = service.NewTestService(nil, nil, metricsClient, nil, nil, nil) + s.mockService = service.NewTestService(nil, nil, metricsClient, nil, nil, nil, nil) s.mockShard = &shardContextImpl{ service: s.mockService, diff --git a/service/history/nDCWorkflow_test.go b/service/history/nDCWorkflow_test.go index c42c5e882b0..34fdc5d6bad 100644 --- a/service/history/nDCWorkflow_test.go +++ b/service/history/nDCWorkflow_test.go @@ -70,7 +70,7 @@ func (s *nDCWorkflowSuite) SetupTest() { s.logger = loggerimpl.NewDevelopmentForTest(s.Suite) s.mockClusterMetadata = &mocks.ClusterMetadata{} metricsClient := metrics.NewClient(tally.NoopScope, metrics.History) - s.mockService = service.NewTestService(s.mockClusterMetadata, nil, metricsClient, nil, nil, nil) + s.mockService = service.NewTestService(s.mockClusterMetadata, nil, metricsClient, nil, nil, nil, nil) s.mockDomainCache = &cache.DomainCacheMock{} s.mockShard = &shardContextImpl{ diff --git a/service/history/queueAckMgr_test.go b/service/history/queueAckMgr_test.go index f362df65d46..b4371ac0290 100644 --- a/service/history/queueAckMgr_test.go +++ b/service/history/queueAckMgr_test.go @@ -112,7 +112,7 @@ func (s *queueAckMgrSuite) SetupTest() { s.mockProcessor = &MockProcessor{} s.mockMessagingClient = mocks.NewMockMessagingClient(s.mockProducer, nil) s.mockClientBean = &client.MockClientBean{} - s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.metricsClient, s.mockClientBean, nil, nil) + s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.metricsClient, s.mockClientBean, nil, nil, nil) s.mockShard = &shardContextImpl{ service: s.mockService, clusterMetadata: s.mockClusterMetadata, @@ -321,7 +321,7 @@ func (s *queueFailoverAckMgrSuite) SetupTest() { s.mockProcessor = &MockProcessor{} s.mockMessagingClient = mocks.NewMockMessagingClient(s.mockProducer, nil) s.mockClientBean = &client.MockClientBean{} - s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.metricsClient, s.mockClientBean, nil, nil) + s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.metricsClient, s.mockClientBean, nil, nil, nil) s.mockShard = &shardContextImpl{ service: s.mockService, clusterMetadata: s.mockClusterMetadata, diff --git a/service/history/replicatorQueueProcessor_test.go b/service/history/replicatorQueueProcessor_test.go index e60e8800960..0374377256c 100644 --- a/service/history/replicatorQueueProcessor_test.go +++ b/service/history/replicatorQueueProcessor_test.go @@ -91,7 +91,7 @@ func (s *replicatorQueueProcessorSuite) SetupTest() { s.mockClusterMetadata = &mocks.ClusterMetadata{} s.mockMessagingClient = mocks.NewMockMessagingClient(s.mockProducer, nil) s.mockClientBean = &client.MockClientBean{} - s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, metricsClient, s.mockClientBean, nil, nil) + s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, metricsClient, s.mockClientBean, nil, nil, nil) s.mockShard = &shardContextImpl{ service: s.mockService, shardInfo: &persistence.ShardInfo{ShardID: 0, RangeID: 1, TransferAckLevel: 0}, diff --git a/service/history/shardController_test.go b/service/history/shardController_test.go index fb3c6f5ccd5..e29fc2683cf 100644 --- a/service/history/shardController_test.go +++ b/service/history/shardController_test.go @@ -89,7 +89,7 @@ func (s *shardControllerSuite) SetupTest() { s.mockClusterMetadata = &mmocks.ClusterMetadata{} s.mockMessagingClient = mmocks.NewMockMessagingClient(s.mockMessaging, nil) s.mockClientBean = &client.MockClientBean{} - s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.metricsClient, s.mockClientBean, nil, nil) + s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.metricsClient, s.mockClientBean, nil, nil, nil) s.domainCache = cache.NewDomainCache(s.mockMetadaraMgr, s.mockClusterMetadata, s.metricsClient, s.logger) s.controller = newShardController(s.mockService, s.hostInfo, s.mockServiceResolver, s.mockShardManager, s.mockHistoryMgr, s.mockHistoryV2Mgr, s.domainCache, s.mockExecutionMgrFactory, s.mockEngineFactory, s.config, s.logger, s.metricsClient) diff --git a/service/history/stateBuilder_test.go b/service/history/stateBuilder_test.go index c505419f242..0fff2e32b01 100644 --- a/service/history/stateBuilder_test.go +++ b/service/history/stateBuilder_test.go @@ -91,7 +91,7 @@ func (s *stateBuilderSuite) SetupTest() { s.mockDomainCache = &cache.DomainCacheMock{} metricsClient := metrics.NewClient(tally.NoopScope, metrics.History) s.mockClientBean = &client.MockClientBean{} - s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, metricsClient, s.mockClientBean, nil, nil) + s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, metricsClient, s.mockClientBean, nil, nil, nil) s.mockEventsCache = &MockEventsCache{} s.mockShard = &shardContextImpl{ diff --git a/service/history/taskProcessor_test.go b/service/history/taskProcessor_test.go index a60dedc026c..2e51d6ebfad 100644 --- a/service/history/taskProcessor_test.go +++ b/service/history/taskProcessor_test.go @@ -78,7 +78,7 @@ func (s *taskProcessorSuite) SetupTest() { s.mockProcessor = &MockTimerProcessor{} s.mockQueueAckMgr = &MockTimerQueueAckMgr{} s.mockMetadataMgr = &mocks.MetadataManager{} - s.mockService = service.NewTestService(nil, nil, metricsClient, nil, nil, nil) + s.mockService = service.NewTestService(nil, nil, metricsClient, nil, nil, nil, nil) s.mockShard = &shardContextImpl{ service: s.mockService, shardInfo: &persistence.ShardInfo{ShardID: shardID, RangeID: 1, TransferAckLevel: 0}, diff --git a/service/history/timerQueueAckMgr_test.go b/service/history/timerQueueAckMgr_test.go index 9e197f1430d..d9bdd58312a 100644 --- a/service/history/timerQueueAckMgr_test.go +++ b/service/history/timerQueueAckMgr_test.go @@ -113,7 +113,7 @@ func (s *timerQueueAckMgrSuite) SetupTest() { s.mockProducer = &mocks.KafkaProducer{} s.mockMessagingClient = mocks.NewMockMessagingClient(s.mockProducer, nil) s.mockClientBean = &client.MockClientBean{} - s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.metricsClient, s.mockClientBean, nil, nil) + s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.metricsClient, s.mockClientBean, nil, nil, nil) s.mockShard = &shardContextImpl{ service: s.mockService, clusterMetadata: s.mockClusterMetadata, @@ -562,7 +562,7 @@ func (s *timerQueueFailoverAckMgrSuite) SetupTest() { s.mockProducer = &mocks.KafkaProducer{} s.mockMessagingClient = mocks.NewMockMessagingClient(s.mockProducer, nil) s.mockClientBean = &client.MockClientBean{} - s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.metricsClient, s.mockClientBean, nil, nil) + s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, s.metricsClient, s.mockClientBean, nil, nil, nil) s.mockShard = &shardContextImpl{ service: s.mockService, clusterMetadata: s.mockClusterMetadata, diff --git a/service/history/timerQueueProcessor2_test.go b/service/history/timerQueueProcessor2_test.go index c5c70a1b828..44a77085158 100644 --- a/service/history/timerQueueProcessor2_test.go +++ b/service/history/timerQueueProcessor2_test.go @@ -127,7 +127,7 @@ func (s *timerQueueProcessor2Suite) SetupTest() { metricsClient := metrics.NewClient(tally.NoopScope, metrics.History) s.mockMessagingClient = mocks.NewMockMessagingClient(s.mockProducer, nil) s.mockClientBean = &client.MockClientBean{} - s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, metricsClient, s.mockClientBean, nil, nil) + s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, metricsClient, s.mockClientBean, nil, nil, nil) s.mockEventsCache = &MockEventsCache{} domainCache := cache.NewDomainCache(s.mockMetadataMgr, s.mockClusterMetadata, metricsClient, s.logger) diff --git a/service/history/timerQueueProcessorBase_test.go b/service/history/timerQueueProcessorBase_test.go index 9c7bf71dfae..37acb306170 100644 --- a/service/history/timerQueueProcessorBase_test.go +++ b/service/history/timerQueueProcessorBase_test.go @@ -93,7 +93,7 @@ func (s *timerQueueProcessorBaseSuite) SetupTest() { s.mockMetadataMgr = &mocks.MetadataManager{} s.mockClusterMetadata = &mocks.ClusterMetadata{} s.mockClientBean = &client.MockClientBean{} - s.mockService = service.NewTestService(s.mockClusterMetadata, nil, metricsClient, s.mockClientBean, nil, nil) + s.mockService = service.NewTestService(s.mockClusterMetadata, nil, metricsClient, s.mockClientBean, nil, nil, nil) s.mockExecutionManager = &mocks.ExecutionManager{} s.mockVisibilityManager = &mocks.VisibilityManager{} s.mockHistoryV2Manager = &mocks.HistoryV2Manager{} diff --git a/service/history/timerQueueStandbyProcessor_test.go b/service/history/timerQueueStandbyProcessor_test.go index 5d5e11e297f..b33ff27f12d 100644 --- a/service/history/timerQueueStandbyProcessor_test.go +++ b/service/history/timerQueueStandbyProcessor_test.go @@ -115,7 +115,7 @@ func (s *timerQueueStandbyProcessorSuite) SetupTest() { ) metricsClient := metrics.NewClient(tally.NoopScope, metrics.History) s.mockClientBean = &client.MockClientBean{} - s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, metricsClient, s.mockClientBean, nil, nil) + s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, metricsClient, s.mockClientBean, nil, nil, nil) config := NewDynamicConfigForTest() shardContext := &shardContextImpl{ diff --git a/service/history/transferQueueActiveProcessor_test.go b/service/history/transferQueueActiveProcessor_test.go index 23f9b452eac..d909fa72d03 100644 --- a/service/history/transferQueueActiveProcessor_test.go +++ b/service/history/transferQueueActiveProcessor_test.go @@ -146,6 +146,7 @@ func (s *transferQueueActiveProcessorSuite) SetupTest() { s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, + nil, ) shardContext := &shardContextImpl{ diff --git a/service/history/transferQueueStandbyProcessor_test.go b/service/history/transferQueueStandbyProcessor_test.go index 05313350060..fc942ebf16e 100644 --- a/service/history/transferQueueStandbyProcessor_test.go +++ b/service/history/transferQueueStandbyProcessor_test.go @@ -139,6 +139,7 @@ func (s *transferQueueStandbyProcessorSuite) SetupTest() { s.mockClientBean, s.mockArchivalMetadata, s.mockArchiverProvider, + nil, ) config := NewDynamicConfigForTest() diff --git a/service/history/workflowExecutionContext.go b/service/history/workflowExecutionContext.go index e2c94850a5b..291ccd4e0ca 100644 --- a/service/history/workflowExecutionContext.go +++ b/service/history/workflowExecutionContext.go @@ -36,6 +36,10 @@ import ( "github.com/uber/cadence/common/persistence" ) +const ( + defaultRemoteCallTimeout = 30 * time.Second +) + type ( workflowExecutionContext interface { getDomainName() string @@ -120,6 +124,13 @@ type ( ) (retError error) getQueryRegistry() QueryRegistry + + reapplyEvents( + ctx context.Context, + domainID string, + workflowID string, + events []*workflow.HistoryEvent, + ) error } ) @@ -351,12 +362,14 @@ func (c *workflowExecutionContextImpl) conflictResolveWorkflowExecution( return err } resetHistorySize := c.getHistorySize() + reapplyEvent := []*workflow.HistoryEvent{} for _, workflowEvents := range workflowEventsSeq { eventsSize, err := c.persistNonFirstWorkflowEvents(workflowEvents) if err != nil { return err } resetHistorySize += eventsSize + reapplyEvent = append(reapplyEvent, workflowEvents.Events...) } c.setHistorySize(resetHistorySize) resetWorkflow.ExecutionStats = &persistence.ExecutionStats{ @@ -380,7 +393,8 @@ func (c *workflowExecutionContextImpl) conflictResolveWorkflowExecution( return err } newWorkflowSizeSize := newContext.getHistorySize() - eventsSize, err := c.persistFirstWorkflowEvents(workflowEventsSeq[0]) + startEvent := workflowEventsSeq[0] + eventsSize, err := c.persistFirstWorkflowEvents(startEvent) if err != nil { return err } @@ -389,6 +403,7 @@ func (c *workflowExecutionContextImpl) conflictResolveWorkflowExecution( newWorkflow.ExecutionStats = &persistence.ExecutionStats{ HistorySize: newWorkflowSizeSize, } + reapplyEvent = append(reapplyEvent, startEvent.Events...) } var currentWorkflow *persistence.WorkflowMutation @@ -421,6 +436,15 @@ func (c *workflowExecutionContextImpl) conflictResolveWorkflowExecution( } } + if err := c.conflictResolveEventReapply( + conflictResolveMode, + resetWorkflow.ExecutionInfo.DomainID, + resetWorkflow.ExecutionInfo.WorkflowID, + reapplyEvent, + ); err != nil { + return err + } + if err := c.shard.ConflictResolveWorkflowExecution(&persistence.ConflictResolveWorkflowExecutionRequest{ // RangeID , this is set by shard context Mode: conflictResolveMode, @@ -561,6 +585,8 @@ func (c *workflowExecutionContextImpl) updateWorkflowExecutionWithNew( if err != nil { return err } + + reapplyEvent := []*workflow.HistoryEvent{} currentWorkflowSize := c.getHistorySize() for _, workflowEvents := range workflowEventsSeq { eventsSize, err := c.persistNonFirstWorkflowEvents(workflowEvents) @@ -568,6 +594,7 @@ func (c *workflowExecutionContextImpl) updateWorkflowExecutionWithNew( return err } currentWorkflowSize += eventsSize + reapplyEvent = append(reapplyEvent, workflowEvents.Events...) } c.setHistorySize(currentWorkflowSize) currentWorkflow.ExecutionStats = &persistence.ExecutionStats{ @@ -591,7 +618,8 @@ func (c *workflowExecutionContextImpl) updateWorkflowExecutionWithNew( return err } newWorkflowSizeSize := newContext.getHistorySize() - eventsSize, err := c.persistFirstWorkflowEvents(workflowEventsSeq[0]) + startEvent := workflowEventsSeq[0] + eventsSize, err := c.persistFirstWorkflowEvents(startEvent) if err != nil { return err } @@ -600,6 +628,7 @@ func (c *workflowExecutionContextImpl) updateWorkflowExecutionWithNew( newWorkflow.ExecutionStats = &persistence.ExecutionStats{ HistorySize: newWorkflowSizeSize, } + reapplyEvent = append(reapplyEvent, startEvent.Events...) } if err := c.mergeContinueAsNewReplicationTasks( @@ -609,6 +638,15 @@ func (c *workflowExecutionContextImpl) updateWorkflowExecutionWithNew( return err } + if err := c.updateWorkflowExecutionEventReapply( + updateMode, + currentWorkflow.ExecutionInfo.DomainID, + currentWorkflow.ExecutionInfo.WorkflowID, + reapplyEvent, + ); err != nil { + return err + } + resp, err := c.updateWorkflowExecutionWithRetry(&persistence.UpdateWorkflowExecutionRequest{ // RangeID , this is set by shard context Mode: updateMode, @@ -1142,3 +1180,98 @@ func (c *workflowExecutionContextImpl) resetWorkflowExecution( func (c *workflowExecutionContextImpl) getQueryRegistry() QueryRegistry { return c.queryRegistry } + +func (c *workflowExecutionContextImpl) updateWorkflowExecutionEventReapply( + updateMode persistence.UpdateWorkflowMode, + domainID string, + workflowID string, + events []*workflow.HistoryEvent, +) error { + if updateMode == persistence.UpdateWorkflowModeBypassCurrent { + ctx, cancel := context.WithTimeout(context.Background(), defaultRemoteCallTimeout) + err := c.reapplyEvents(ctx, domainID, workflowID, events) + defer cancel() + if err != nil { + return err + } + } + return nil +} + +func (c *workflowExecutionContextImpl) conflictResolveEventReapply( + conflictResolveMode persistence.ConflictResolveWorkflowMode, + domainID string, + workflowID string, + events []*workflow.HistoryEvent, +) error { + if conflictResolveMode == persistence.ConflictResolveWorkflowModeBypassCurrent { + ctx, cancel := context.WithTimeout(context.Background(), defaultRemoteCallTimeout) + err := c.reapplyEvents(ctx, domainID, workflowID, events) + defer cancel() + if err != nil { + return err + } + } + return nil +} + +func (c *workflowExecutionContextImpl) reapplyEvents( + ctx context.Context, + domainID string, + workflowID string, + events []*workflow.HistoryEvent, +) error { + + reapplyEvents := []*workflow.HistoryEvent{} + // TODO: need to implement Reapply policy + for _, event := range events { + switch event.GetEventType() { + case workflow.EventTypeWorkflowExecutionSignaled: + reapplyEvents = append(reapplyEvents, event) + } + } + // there is no event to reapply + if len(reapplyEvents) == 0 { + return nil + } + // Reapply events only reapply to the current run. + // Leave the run id empty will reapply events to the current run. + execution := &workflow.WorkflowExecution{ + WorkflowId: common.StringPtr(workflowID), + } + domainCache := c.shard.GetDomainCache() + clientBean := c.shard.GetService().GetClientBean() + serializer := c.shard.GetService().GetPayloadSerializer() + domainEntry, err := domainCache.GetDomainByID(domainID) + if err != nil { + return err + } + + // TODO: handle event apply to self which will be a problem because there is a lock on workflow context + activeCluster := domainEntry.GetReplicationConfig().ActiveClusterName + if activeCluster == c.shard.GetClusterMetadata().GetCurrentClusterName() { + return c.shard.GetEngine().ReapplyEvents( + ctx, + domainID, + workflowID, + reapplyEvents, + ) + } + + // The active cluster of the domain is the same as current cluster. + // Use the history from the same cluster to reapply events + reapplyEventsDataBlob, err := serializer.SerializeBatchEvents(reapplyEvents, common.EncodingTypeThriftRW) + if err != nil { + return err + } + // The active cluster of the domain is differ from the current cluster + // Use frontend client to route this request to the active cluster + // Reapplication only happens in active cluster + return clientBean.GetRemoteFrontendClient(activeCluster).ReapplyEvents( + ctx, + &workflow.ReapplyEventsRequest{ + DomainName: common.StringPtr(domainEntry.GetInfo().Name), + WorkflowExecution: execution, + Events: reapplyEventsDataBlob.ToThrift(), + }) +} diff --git a/service/history/workflowResetor_test.go b/service/history/workflowResetor_test.go index 4910287e3d4..46fe14c8b28 100644 --- a/service/history/workflowResetor_test.go +++ b/service/history/workflowResetor_test.go @@ -125,7 +125,7 @@ func (s *resetorSuite) SetupTest() { metricsClient := metrics.NewClient(tally.NoopScope, metrics.History) s.mockMessagingClient = mocks.NewMockMessagingClient(s.mockProducer, nil) s.mockClientBean = &client.MockClientBean{} - s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, metricsClient, s.mockClientBean, nil, nil) + s.mockService = service.NewTestService(s.mockClusterMetadata, s.mockMessagingClient, metricsClient, s.mockClientBean, nil, nil, nil) s.mockDomainCache = &cache.DomainCacheMock{} s.mockArchivalClient = &archiver.ClientMock{} s.mockEventsCache = &MockEventsCache{}