diff --git a/.gen/go/history/history.go b/.gen/go/history/history.go index 25f48a7acba..b4c267db25b 100644 --- a/.gen/go/history/history.go +++ b/.gen/go/history/history.go @@ -11031,6 +11031,7 @@ type SyncActivityRequest struct { Attempt *int32 `json:"attempt,omitempty"` LastFailureReason *string `json:"lastFailureReason,omitempty"` LastWorkerIdentity *string `json:"lastWorkerIdentity,omitempty"` + LastFailureDetails []byte `json:"lastFailureDetails,omitempty"` } // ToWire translates a SyncActivityRequest struct into a Thrift-level intermediate @@ -11050,7 +11051,7 @@ type SyncActivityRequest struct { // } func (v *SyncActivityRequest) ToWire() (wire.Value, error) { var ( - fields [13]wire.Field + fields [14]wire.Field i int = 0 w wire.Value err error @@ -11160,6 +11161,14 @@ func (v *SyncActivityRequest) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 130, Value: w} i++ } + if v.LastFailureDetails != nil { + w, err = wire.NewValueBinary(v.LastFailureDetails), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 140, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -11313,6 +11322,14 @@ func (v *SyncActivityRequest) FromWire(w wire.Value) error { return err } + } + case 140: + if field.Value.Type() == wire.TBinary { + v.LastFailureDetails, err = field.Value.GetBinary(), error(nil) + if err != nil { + return err + } + } } } @@ -11327,7 +11344,7 @@ func (v *SyncActivityRequest) String() string { return "" } - var fields [13]string + var fields [14]string i := 0 if v.DomainId != nil { fields[i] = fmt.Sprintf("DomainId: %v", *(v.DomainId)) @@ -11381,6 +11398,10 @@ func (v *SyncActivityRequest) String() string { fields[i] = fmt.Sprintf("LastWorkerIdentity: %v", *(v.LastWorkerIdentity)) i++ } + if v.LastFailureDetails != nil { + fields[i] = fmt.Sprintf("LastFailureDetails: %v", v.LastFailureDetails) + i++ + } return fmt.Sprintf("SyncActivityRequest{%v}", strings.Join(fields[:i], ", ")) } @@ -11434,6 +11455,9 @@ func (v *SyncActivityRequest) Equals(rhs *SyncActivityRequest) bool { if !_String_EqualsPtr(v.LastWorkerIdentity, rhs.LastWorkerIdentity) { return false } + if !((v.LastFailureDetails == nil && rhs.LastFailureDetails == nil) || (v.LastFailureDetails != nil && rhs.LastFailureDetails != nil && bytes.Equal(v.LastFailureDetails, rhs.LastFailureDetails))) { + return false + } return true } @@ -11483,6 +11507,9 @@ func (v *SyncActivityRequest) MarshalLogObject(enc zapcore.ObjectEncoder) (err e if v.LastWorkerIdentity != nil { enc.AddString("lastWorkerIdentity", *v.LastWorkerIdentity) } + if v.LastFailureDetails != nil { + enc.AddString("lastFailureDetails", base64.StdEncoding.EncodeToString(v.LastFailureDetails)) + } return err } @@ -11681,6 +11708,21 @@ func (v *SyncActivityRequest) IsSetLastWorkerIdentity() bool { return v != nil && v.LastWorkerIdentity != nil } +// GetLastFailureDetails returns the value of LastFailureDetails if it is set or its +// zero value if it is unset. +func (v *SyncActivityRequest) GetLastFailureDetails() (o []byte) { + if v != nil && v.LastFailureDetails != nil { + return v.LastFailureDetails + } + + return +} + +// IsSetLastFailureDetails returns true if LastFailureDetails is not nil. +func (v *SyncActivityRequest) IsSetLastFailureDetails() bool { + return v != nil && v.LastFailureDetails != nil +} + type SyncShardStatusRequest struct { SourceCluster *string `json:"sourceCluster,omitempty"` ShardId *int64 `json:"shardId,omitempty"` @@ -12098,7 +12140,7 @@ var ThriftModule = &thriftreflect.ThriftModule{ Name: "history", Package: "github.com/uber/cadence/.gen/go/history", FilePath: "history.thrift", - SHA1: "a0d43b027b894682c5aec52b3c12a1e4f747b27b", + SHA1: "ce5eb8d365d70e410d0c32a8a6bf86d2b924b122", Includes: []*thriftreflect.ThriftModule{ replicator.ThriftModule, shared.ThriftModule, @@ -12106,7 +12148,7 @@ var ThriftModule = &thriftreflect.ThriftModule{ Raw: rawIDL, } -const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\ninclude \"shared.thrift\"\ninclude \"replicator.thrift\"\n\nnamespace java com.uber.cadence.history\n\nexception EventAlreadyStartedError {\n 1: required string message\n}\n\nexception ShardOwnershipLostError {\n 10: optional string message\n 20: optional string owner\n}\n\nstruct ParentExecutionInfo {\n 10: optional string domainUUID\n 15: optional string domain\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") initiatedId\n}\n\nstruct StartWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.StartWorkflowExecutionRequest startRequest\n 30: optional ParentExecutionInfo parentExecutionInfo\n 40: optional i32 attempt\n 50: optional i64 (js.type = \"Long\") expirationTimestamp\n 55: optional shared.ContinueAsNewInitiator continueAsNewInitiator\n 56: optional string continuedFailureReason\n 57: optional binary continuedFailureDetails\n 58: optional binary lastCompletionResult\n 60: optional i32 firstDecisionTaskBackoffSeconds\n}\n\nstruct DescribeMutableStateRequest{\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n}\n\nstruct DescribeMutableStateResponse{\n 30: optional string mutableStateInCache\n 40: optional string mutableStateInDatabase\n}\n\nstruct GetMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") expectedNextEventId\n 40: optional binary currentBranchToken\n}\n\nstruct GetMutableStateResponse {\n 10: optional shared.WorkflowExecution execution\n 20: optional shared.WorkflowType workflowType\n 30: optional i64 (js.type = \"Long\") NextEventId\n 35: optional i64 (js.type = \"Long\") PreviousStartedEventId\n 40: optional i64 (js.type = \"Long\") LastFirstEventId\n 50: optional shared.TaskList taskList\n 60: optional shared.TaskList stickyTaskList\n 70: optional string clientLibraryVersion\n 80: optional string clientFeatureVersion\n 90: optional string clientImpl\n //TODO: isWorkflowRunning is deprecating. workflowState is going replace this field\n 100: optional bool isWorkflowRunning\n 110: optional i32 stickyTaskListScheduleToStartTimeout\n 120: optional i32 eventStoreVersion\n 130: optional binary currentBranchToken\n 140: optional map replicationInfo\n // TODO: when migrating to gRPC, make this a enum\n // TODO: when migrating to gRPC, unify internal & external representation\n // NOTE: workflowState & workflowCloseState are the same as persistence representation\n 150: optional i32 workflowState\n 160: optional i32 workflowCloseState\n 170: optional shared.VersionHistories versionHistories\n}\n\nstruct PollMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") expectedNextEventId\n 40: optional binary currentBranchToken\n}\n\nstruct PollMutableStateResponse {\n 10: optional shared.WorkflowExecution execution\n 20: optional shared.WorkflowType workflowType\n 30: optional i64 (js.type = \"Long\") NextEventId\n 35: optional i64 (js.type = \"Long\") PreviousStartedEventId\n 40: optional i64 (js.type = \"Long\") LastFirstEventId\n 50: optional shared.TaskList taskList\n 60: optional shared.TaskList stickyTaskList\n 70: optional string clientLibraryVersion\n 80: optional string clientFeatureVersion\n 90: optional string clientImpl\n 100: optional i32 stickyTaskListScheduleToStartTimeout\n 110: optional binary currentBranchToken\n 120: optional map replicationInfo\n 130: optional shared.VersionHistories versionHistories\n // TODO: when migrating to gRPC, make this a enum\n // TODO: when migrating to gRPC, unify internal & external representation\n // NOTE: workflowState & workflowCloseState are the same as persistence representation\n 140: optional i32 workflowState\n 150: optional i32 workflowCloseState\n}\n\nstruct ResetStickyTaskListRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n}\n\nstruct ResetStickyTaskListResponse {\n // The reason to keep this response is to allow returning\n // information in the future.\n}\n\nstruct RespondDecisionTaskCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondDecisionTaskCompletedRequest completeRequest\n}\n\nstruct RespondDecisionTaskCompletedResponse {\n 10: optional RecordDecisionTaskStartedResponse startedResponse\n}\n\nstruct RespondDecisionTaskFailedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondDecisionTaskFailedRequest failedRequest\n}\n\nstruct RecordActivityTaskHeartbeatRequest {\n 10: optional string domainUUID\n 20: optional shared.RecordActivityTaskHeartbeatRequest heartbeatRequest\n}\n\nstruct RespondActivityTaskCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskCompletedRequest completeRequest\n}\n\nstruct RespondActivityTaskFailedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskFailedRequest failedRequest\n}\n\nstruct RespondActivityTaskCanceledRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskCanceledRequest cancelRequest\n}\n\nstruct RecordActivityTaskStartedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") scheduleId\n 40: optional i64 (js.type = \"Long\") taskId\n 45: optional string requestId // Unique id of each poll request. Used to ensure at most once delivery of tasks.\n 50: optional shared.PollForActivityTaskRequest pollRequest\n}\n\nstruct RecordActivityTaskStartedResponse {\n 20: optional shared.HistoryEvent scheduledEvent\n 30: optional i64 (js.type = \"Long\") startedTimestamp\n 40: optional i64 (js.type = \"Long\") attempt\n 50: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 60: optional binary heartbeatDetails\n 70: optional shared.WorkflowType workflowType\n 80: optional string workflowDomain\n}\n\nstruct RecordDecisionTaskStartedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") scheduleId\n 40: optional i64 (js.type = \"Long\") taskId\n 45: optional string requestId // Unique id of each poll request. Used to ensure at most once delivery of tasks.\n 50: optional shared.PollForDecisionTaskRequest pollRequest\n}\n\nstruct RecordDecisionTaskStartedResponse {\n 10: optional shared.WorkflowType workflowType\n 20: optional i64 (js.type = \"Long\") previousStartedEventId\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") attempt\n 70: optional bool stickyExecutionEnabled\n 80: optional shared.TransientDecisionInfo decisionInfo\n 90: optional shared.TaskList WorkflowExecutionTaskList\n 100: optional i32 eventStoreVersion\n 110: optional binary branchToken\n 120: optional i64 (js.type = \"Long\") scheduledTimestamp\n 130: optional i64 (js.type = \"Long\") startedTimestamp\n}\n\nstruct SignalWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.SignalWorkflowExecutionRequest signalRequest\n 30: optional shared.WorkflowExecution externalWorkflowExecution\n 40: optional bool childWorkflowOnly\n}\n\nstruct SignalWithStartWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.SignalWithStartWorkflowExecutionRequest signalWithStartRequest\n}\n\nstruct RemoveSignalMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional string requestId\n}\n\nstruct TerminateWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.TerminateWorkflowExecutionRequest terminateRequest\n}\n\nstruct ResetWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.ResetWorkflowExecutionRequest resetRequest\n}\n\nstruct RequestCancelWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.RequestCancelWorkflowExecutionRequest cancelRequest\n 30: optional i64 (js.type = \"Long\") externalInitiatedEventId\n 40: optional shared.WorkflowExecution externalWorkflowExecution\n 50: optional bool childWorkflowOnly\n}\n\nstruct ScheduleDecisionTaskRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional bool isFirstDecision\n}\n\nstruct DescribeWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.DescribeWorkflowExecutionRequest request\n}\n\n/**\n* RecordChildExecutionCompletedRequest is used for reporting the completion of child execution to parent workflow\n* execution which started it. When a child execution is completed it creates this request and calls the\n* RecordChildExecutionCompleted API with the workflowExecution of parent. It also sets the completedExecution of the\n* child as it could potentially be different than the ChildExecutionStartedEvent of parent in the situation when\n* child creates multiple runs through ContinueAsNew before finally completing.\n**/\nstruct RecordChildExecutionCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") initiatedId\n 40: optional shared.WorkflowExecution completedExecution\n 50: optional shared.HistoryEvent completionEvent\n}\n\nstruct ReplicateEventsRequest {\n 10: optional string sourceCluster\n 20: optional string domainUUID\n 30: optional shared.WorkflowExecution workflowExecution\n 40: optional i64 (js.type = \"Long\") firstEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") version\n 70: optional map replicationInfo\n 80: optional shared.History history\n 90: optional shared.History newRunHistory\n 100: optional bool forceBufferEvents // this attribute is deprecated\n 110: optional i32 eventStoreVersion\n 120: optional i32 newRunEventStoreVersion\n 130: optional bool resetWorkflow\n 140: optional bool newRunNDC\n}\n\nstruct ReplicateRawEventsRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional map replicationInfo\n 40: optional shared.DataBlob history\n 50: optional shared.DataBlob newRunHistory\n 60: optional i32 eventStoreVersion\n 70: optional i32 newRunEventStoreVersion\n}\n\nstruct ReplicateEventsV2Request {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional list versionHistoryItems\n 40: optional shared.DataBlob events\n // new run events does not need version history since there is no prior events\n 60: optional shared.DataBlob newRunEvents\n 70: optional bool resetWorkflow\n}\n\nstruct SyncShardStatusRequest {\n 10: optional string sourceCluster\n 20: optional i64 (js.type = \"Long\") shardId\n 30: optional i64 (js.type = \"Long\") timestamp\n}\n\nstruct SyncActivityRequest {\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") version\n 50: optional i64 (js.type = \"Long\") scheduledId\n 60: optional i64 (js.type = \"Long\") scheduledTime\n 70: optional i64 (js.type = \"Long\") startedId\n 80: optional i64 (js.type = \"Long\") startedTime\n 90: optional i64 (js.type = \"Long\") lastHeartbeatTime\n 100: optional binary details\n 110: optional i32 attempt\n 120: optional string lastFailureReason\n 130: optional string lastWorkerIdentity\n}\n\nstruct QueryWorkflowRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n 30: optional shared.WorkflowQuery query\n}\n\nstruct QueryWorkflowResponse {\n 10: optional binary queryResult\n}\n\n/**\n* HistoryService provides API to start a new long running workflow instance, as well as query and update the history\n* of workflow instances already created.\n**/\nservice HistoryService {\n /**\n * StartWorkflowExecution starts a new long running workflow instance. It will create the instance with\n * 'WorkflowExecutionStarted' event in history and also schedule the first DecisionTask for the worker to make the\n * first decision for this instance. It will return 'WorkflowExecutionAlreadyStartedError', if an instance already\n * exists with same workflowId.\n **/\n shared.StartWorkflowExecutionResponse StartWorkflowExecution(1: StartWorkflowExecutionRequest startRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * Returns the information from mutable state of workflow execution.\n * It fails with 'EntityNotExistError' if specified workflow execution in unknown to the service.\n * It returns CurrentBranchChangedError if the workflow version branch has changed.\n **/\n GetMutableStateResponse GetMutableState(1: GetMutableStateRequest getRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.CurrentBranchChangedError currentBranchChangedError,\n )\n\n /**\n * Returns the information from mutable state of workflow execution.\n * It fails with 'EntityNotExistError' if specified workflow execution in unknown to the service.\n * It returns CurrentBranchChangedError if the workflow version branch has changed.\n **/\n PollMutableStateResponse PollMutableState(1: PollMutableStateRequest pollRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.CurrentBranchChangedError currentBranchChangedError,\n )\n\n /**\n * Reset the sticky tasklist related information in mutable state of a given workflow.\n * Things cleared are:\n * 1. StickyTaskList\n * 2. StickyScheduleToStartTimeout\n * 3. ClientLibraryVersion\n * 4. ClientFeatureVersion\n * 5. ClientImpl\n **/\n ResetStickyTaskListResponse ResetStickyTaskList(1: ResetStickyTaskListRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordDecisionTaskStarted is called by the Matchingservice before it hands a decision task to the application worker in response to\n * a PollForDecisionTask call. It records in the history the event that the decision task has started. It will return 'EventAlreadyStartedError',\n * if the workflow's execution history already includes a record of the event starting.\n **/\n RecordDecisionTaskStartedResponse RecordDecisionTaskStarted(1: RecordDecisionTaskStartedRequest addRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: EventAlreadyStartedError eventAlreadyStartedError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordActivityTaskStarted is called by the Matchingservice before it hands a decision task to the application worker in response to\n * a PollForActivityTask call. It records in the history the event that the decision task has started. It will return 'EventAlreadyStartedError',\n * if the workflow's execution history already includes a record of the event starting.\n **/\n RecordActivityTaskStartedResponse RecordActivityTaskStarted(1: RecordActivityTaskStartedRequest addRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: EventAlreadyStartedError eventAlreadyStartedError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondDecisionTaskCompleted is called by application worker to complete a DecisionTask handed as a result of\n * 'PollForDecisionTask' API call. Completing a DecisionTask will result in new events for the workflow execution and\n * potentially new ActivityTask being created for corresponding decisions. It will also create a DecisionTaskCompleted\n * event in the history for that session. Use the 'taskToken' provided as response of PollForDecisionTask API call\n * for completing the DecisionTask.\n **/\n RespondDecisionTaskCompletedResponse RespondDecisionTaskCompleted(1: RespondDecisionTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondDecisionTaskFailed is called by application worker to indicate failure. This results in\n * DecisionTaskFailedEvent written to the history and a new DecisionTask created. This API can be used by client to\n * either clear sticky tasklist or report ny panics during DecisionTask processing.\n **/\n void RespondDecisionTaskFailed(1: RespondDecisionTaskFailedRequest failedRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordActivityTaskHeartbeat is called by application worker while it is processing an ActivityTask. If worker fails\n * to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and\n * 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeat' will\n * fail with 'EntityNotExistsError' in such situations. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for heartbeating.\n **/\n shared.RecordActivityTaskHeartbeatResponse RecordActivityTaskHeartbeat(1: RecordActivityTaskHeartbeatRequest heartbeatRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskCompleted is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new DecisionTask\n * created for the workflow so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCompleted(1: RespondActivityTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskFailed is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskFailed' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskFailed(1: RespondActivityTaskFailedRequest failRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskCanceled is called by application worker when it is successfully canceled an ActivityTask. It will\n * result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCanceled(1: RespondActivityTaskCanceledRequest canceledRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in\n * WorkflowExecutionSignaled event recorded in the history and a decision task being created for the execution.\n **/\n void SignalWorkflowExecution(1: SignalWorkflowExecutionRequest signalRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.LimitExceededError limitExceededError,\n )\n\n /**\n * SignalWithStartWorkflowExecution is used to ensure sending a signal event to a workflow execution.\n * If workflow is running, this results in WorkflowExecutionSignaled event recorded in the history\n * and a decision task being created for the execution.\n * If workflow is not running or not found, it will first try start workflow with given WorkflowIDResuePolicy,\n * and record WorkflowExecutionStarted and WorkflowExecutionSignaled event in case of success.\n * It will return `WorkflowExecutionAlreadyStartedError` if start workflow failed with given policy.\n **/\n shared.StartWorkflowExecutionResponse SignalWithStartWorkflowExecution(1: SignalWithStartWorkflowExecutionRequest signalWithStartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: ShardOwnershipLostError shardOwnershipLostError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.WorkflowExecutionAlreadyStartedError workflowAlreadyStartedError,\n )\n\n /**\n * RemoveSignalMutableState is used to remove a signal request ID that was previously recorded. This is currently\n * used to clean execution info when signal decision finished.\n **/\n void RemoveSignalMutableState(1: RemoveSignalMutableStateRequest removeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * TerminateWorkflowExecution terminates an existing workflow execution by recording WorkflowExecutionTerminated event\n * in the history and immediately terminating the execution instance.\n **/\n void TerminateWorkflowExecution(1: TerminateWorkflowExecutionRequest terminateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * ResetWorkflowExecution reset an existing workflow execution by a firstEventID of a existing event batch\n * in the history and immediately terminating the current execution instance.\n * After reset, the history will grow from nextFirstEventID.\n **/\n shared.ResetWorkflowExecutionResponse ResetWorkflowExecution(1: ResetWorkflowExecutionRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RequestCancelWorkflowExecution is called by application worker when it wants to request cancellation of a workflow instance.\n * It will result in a new 'WorkflowExecutionCancelRequested' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. It fails with 'EntityNotExistsError' if the workflow is not valid\n * anymore due to completion or doesn't exist.\n **/\n void RequestCancelWorkflowExecution(1: RequestCancelWorkflowExecutionRequest cancelRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.CancellationAlreadyRequestedError cancellationAlreadyRequestedError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * ScheduleDecisionTask is used for creating a decision task for already started workflow execution. This is mainly\n * used by transfer queue processor during the processing of StartChildWorkflowExecution task, where it first starts\n * child execution without creating the decision task and then calls this API after updating the mutable state of\n * parent execution.\n **/\n void ScheduleDecisionTask(1: ScheduleDecisionTaskRequest scheduleRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordChildExecutionCompleted is used for reporting the completion of child workflow execution to parent.\n * This is mainly called by transfer queue processor during the processing of DeleteExecution task.\n **/\n void RecordChildExecutionCompleted(1: RecordChildExecutionCompletedRequest completionRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * DescribeWorkflowExecution returns information about the specified workflow execution.\n **/\n shared.DescribeWorkflowExecutionResponse DescribeWorkflowExecution(1: DescribeWorkflowExecutionRequest describeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateEvents(1: ReplicateEventsRequest replicateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskError retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateRawEvents(1: ReplicateRawEventsRequest replicateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskError retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateEventsV2(1: ReplicateEventsV2Request replicateV2Request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskV2Error retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SyncShardStatus sync the status between shards\n **/\n void SyncShardStatus(1: SyncShardStatusRequest syncShardStatusRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SyncActivity sync the activity status\n **/\n void SyncActivity(1: SyncActivityRequest syncActivityRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.RetryTaskError retryTaskError,\n )\n\n /**\n * DescribeMutableState returns information about the internal states of workflow mutable state.\n **/\n DescribeMutableStateResponse DescribeMutableState(1: DescribeMutableStateRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.AccessDeniedError accessDeniedError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.LimitExceededError limitExceededError,\n )\n\n /**\n * DescribeHistoryHost returns information about the internal states of a history host\n **/\n shared.DescribeHistoryHostResponse DescribeHistoryHost(1: shared.DescribeHistoryHostRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * CloseShard close the shard\n **/\n void CloseShard(1: shared.CloseShardRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RemoveTask remove task based on type, taskid, shardid\n **/\n void RemoveTask(1: shared.RemoveTaskRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n replicator.GetReplicationMessagesResponse GetReplicationMessages(1: replicator.GetReplicationMessagesRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.LimitExceededError limitExceededError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * QueryWorkflow returns query result for a specified workflow execution\n **/\n QueryWorkflowResponse QueryWorkflow(1: QueryWorkflowRequest queryRequest)\n\tthrows (\n\t 1: shared.BadRequestError badRequestError,\n\t 2: shared.InternalServiceError internalServiceError,\n\t 3: shared.EntityNotExistsError entityNotExistError,\n\t 4: shared.QueryFailedError queryFailedError,\n\t 5: shared.LimitExceededError limitExceededError,\n\t 6: shared.ServiceBusyError serviceBusyError,\n\t 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n\t)\n\n /**\n * ReapplyEvents applies stale events to the current workflow and current run\n **/\n void ReapplyEvents(1: shared.ReapplyEventsRequest reapplyEventsRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.DomainNotActiveError domainNotActiveError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: ShardOwnershipLostError shardOwnershipLostError,\n 7: shared.EntityNotExistsError entityNotExistError,\n )\n}\n" +const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\ninclude \"shared.thrift\"\ninclude \"replicator.thrift\"\n\nnamespace java com.uber.cadence.history\n\nexception EventAlreadyStartedError {\n 1: required string message\n}\n\nexception ShardOwnershipLostError {\n 10: optional string message\n 20: optional string owner\n}\n\nstruct ParentExecutionInfo {\n 10: optional string domainUUID\n 15: optional string domain\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") initiatedId\n}\n\nstruct StartWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.StartWorkflowExecutionRequest startRequest\n 30: optional ParentExecutionInfo parentExecutionInfo\n 40: optional i32 attempt\n 50: optional i64 (js.type = \"Long\") expirationTimestamp\n 55: optional shared.ContinueAsNewInitiator continueAsNewInitiator\n 56: optional string continuedFailureReason\n 57: optional binary continuedFailureDetails\n 58: optional binary lastCompletionResult\n 60: optional i32 firstDecisionTaskBackoffSeconds\n}\n\nstruct DescribeMutableStateRequest{\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n}\n\nstruct DescribeMutableStateResponse{\n 30: optional string mutableStateInCache\n 40: optional string mutableStateInDatabase\n}\n\nstruct GetMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") expectedNextEventId\n 40: optional binary currentBranchToken\n}\n\nstruct GetMutableStateResponse {\n 10: optional shared.WorkflowExecution execution\n 20: optional shared.WorkflowType workflowType\n 30: optional i64 (js.type = \"Long\") NextEventId\n 35: optional i64 (js.type = \"Long\") PreviousStartedEventId\n 40: optional i64 (js.type = \"Long\") LastFirstEventId\n 50: optional shared.TaskList taskList\n 60: optional shared.TaskList stickyTaskList\n 70: optional string clientLibraryVersion\n 80: optional string clientFeatureVersion\n 90: optional string clientImpl\n //TODO: isWorkflowRunning is deprecating. workflowState is going replace this field\n 100: optional bool isWorkflowRunning\n 110: optional i32 stickyTaskListScheduleToStartTimeout\n 120: optional i32 eventStoreVersion\n 130: optional binary currentBranchToken\n 140: optional map replicationInfo\n // TODO: when migrating to gRPC, make this a enum\n // TODO: when migrating to gRPC, unify internal & external representation\n // NOTE: workflowState & workflowCloseState are the same as persistence representation\n 150: optional i32 workflowState\n 160: optional i32 workflowCloseState\n 170: optional shared.VersionHistories versionHistories\n}\n\nstruct PollMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") expectedNextEventId\n 40: optional binary currentBranchToken\n}\n\nstruct PollMutableStateResponse {\n 10: optional shared.WorkflowExecution execution\n 20: optional shared.WorkflowType workflowType\n 30: optional i64 (js.type = \"Long\") NextEventId\n 35: optional i64 (js.type = \"Long\") PreviousStartedEventId\n 40: optional i64 (js.type = \"Long\") LastFirstEventId\n 50: optional shared.TaskList taskList\n 60: optional shared.TaskList stickyTaskList\n 70: optional string clientLibraryVersion\n 80: optional string clientFeatureVersion\n 90: optional string clientImpl\n 100: optional i32 stickyTaskListScheduleToStartTimeout\n 110: optional binary currentBranchToken\n 120: optional map replicationInfo\n 130: optional shared.VersionHistories versionHistories\n // TODO: when migrating to gRPC, make this a enum\n // TODO: when migrating to gRPC, unify internal & external representation\n // NOTE: workflowState & workflowCloseState are the same as persistence representation\n 140: optional i32 workflowState\n 150: optional i32 workflowCloseState\n}\n\nstruct ResetStickyTaskListRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n}\n\nstruct ResetStickyTaskListResponse {\n // The reason to keep this response is to allow returning\n // information in the future.\n}\n\nstruct RespondDecisionTaskCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondDecisionTaskCompletedRequest completeRequest\n}\n\nstruct RespondDecisionTaskCompletedResponse {\n 10: optional RecordDecisionTaskStartedResponse startedResponse\n}\n\nstruct RespondDecisionTaskFailedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondDecisionTaskFailedRequest failedRequest\n}\n\nstruct RecordActivityTaskHeartbeatRequest {\n 10: optional string domainUUID\n 20: optional shared.RecordActivityTaskHeartbeatRequest heartbeatRequest\n}\n\nstruct RespondActivityTaskCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskCompletedRequest completeRequest\n}\n\nstruct RespondActivityTaskFailedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskFailedRequest failedRequest\n}\n\nstruct RespondActivityTaskCanceledRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskCanceledRequest cancelRequest\n}\n\nstruct RecordActivityTaskStartedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") scheduleId\n 40: optional i64 (js.type = \"Long\") taskId\n 45: optional string requestId // Unique id of each poll request. Used to ensure at most once delivery of tasks.\n 50: optional shared.PollForActivityTaskRequest pollRequest\n}\n\nstruct RecordActivityTaskStartedResponse {\n 20: optional shared.HistoryEvent scheduledEvent\n 30: optional i64 (js.type = \"Long\") startedTimestamp\n 40: optional i64 (js.type = \"Long\") attempt\n 50: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 60: optional binary heartbeatDetails\n 70: optional shared.WorkflowType workflowType\n 80: optional string workflowDomain\n}\n\nstruct RecordDecisionTaskStartedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") scheduleId\n 40: optional i64 (js.type = \"Long\") taskId\n 45: optional string requestId // Unique id of each poll request. Used to ensure at most once delivery of tasks.\n 50: optional shared.PollForDecisionTaskRequest pollRequest\n}\n\nstruct RecordDecisionTaskStartedResponse {\n 10: optional shared.WorkflowType workflowType\n 20: optional i64 (js.type = \"Long\") previousStartedEventId\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") attempt\n 70: optional bool stickyExecutionEnabled\n 80: optional shared.TransientDecisionInfo decisionInfo\n 90: optional shared.TaskList WorkflowExecutionTaskList\n 100: optional i32 eventStoreVersion\n 110: optional binary branchToken\n 120: optional i64 (js.type = \"Long\") scheduledTimestamp\n 130: optional i64 (js.type = \"Long\") startedTimestamp\n}\n\nstruct SignalWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.SignalWorkflowExecutionRequest signalRequest\n 30: optional shared.WorkflowExecution externalWorkflowExecution\n 40: optional bool childWorkflowOnly\n}\n\nstruct SignalWithStartWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.SignalWithStartWorkflowExecutionRequest signalWithStartRequest\n}\n\nstruct RemoveSignalMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional string requestId\n}\n\nstruct TerminateWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.TerminateWorkflowExecutionRequest terminateRequest\n}\n\nstruct ResetWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.ResetWorkflowExecutionRequest resetRequest\n}\n\nstruct RequestCancelWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.RequestCancelWorkflowExecutionRequest cancelRequest\n 30: optional i64 (js.type = \"Long\") externalInitiatedEventId\n 40: optional shared.WorkflowExecution externalWorkflowExecution\n 50: optional bool childWorkflowOnly\n}\n\nstruct ScheduleDecisionTaskRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional bool isFirstDecision\n}\n\nstruct DescribeWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.DescribeWorkflowExecutionRequest request\n}\n\n/**\n* RecordChildExecutionCompletedRequest is used for reporting the completion of child execution to parent workflow\n* execution which started it. When a child execution is completed it creates this request and calls the\n* RecordChildExecutionCompleted API with the workflowExecution of parent. It also sets the completedExecution of the\n* child as it could potentially be different than the ChildExecutionStartedEvent of parent in the situation when\n* child creates multiple runs through ContinueAsNew before finally completing.\n**/\nstruct RecordChildExecutionCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") initiatedId\n 40: optional shared.WorkflowExecution completedExecution\n 50: optional shared.HistoryEvent completionEvent\n}\n\nstruct ReplicateEventsRequest {\n 10: optional string sourceCluster\n 20: optional string domainUUID\n 30: optional shared.WorkflowExecution workflowExecution\n 40: optional i64 (js.type = \"Long\") firstEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") version\n 70: optional map replicationInfo\n 80: optional shared.History history\n 90: optional shared.History newRunHistory\n 100: optional bool forceBufferEvents // this attribute is deprecated\n 110: optional i32 eventStoreVersion\n 120: optional i32 newRunEventStoreVersion\n 130: optional bool resetWorkflow\n 140: optional bool newRunNDC\n}\n\nstruct ReplicateRawEventsRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional map replicationInfo\n 40: optional shared.DataBlob history\n 50: optional shared.DataBlob newRunHistory\n 60: optional i32 eventStoreVersion\n 70: optional i32 newRunEventStoreVersion\n}\n\nstruct ReplicateEventsV2Request {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional list versionHistoryItems\n 40: optional shared.DataBlob events\n // new run events does not need version history since there is no prior events\n 60: optional shared.DataBlob newRunEvents\n 70: optional bool resetWorkflow\n}\n\nstruct SyncShardStatusRequest {\n 10: optional string sourceCluster\n 20: optional i64 (js.type = \"Long\") shardId\n 30: optional i64 (js.type = \"Long\") timestamp\n}\n\nstruct SyncActivityRequest {\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") version\n 50: optional i64 (js.type = \"Long\") scheduledId\n 60: optional i64 (js.type = \"Long\") scheduledTime\n 70: optional i64 (js.type = \"Long\") startedId\n 80: optional i64 (js.type = \"Long\") startedTime\n 90: optional i64 (js.type = \"Long\") lastHeartbeatTime\n 100: optional binary details\n 110: optional i32 attempt\n 120: optional string lastFailureReason\n 130: optional string lastWorkerIdentity\n 140: optional binary lastFailureDetails\n}\n\nstruct QueryWorkflowRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n 30: optional shared.WorkflowQuery query\n}\n\nstruct QueryWorkflowResponse {\n 10: optional binary queryResult\n}\n\n/**\n* HistoryService provides API to start a new long running workflow instance, as well as query and update the history\n* of workflow instances already created.\n**/\nservice HistoryService {\n /**\n * StartWorkflowExecution starts a new long running workflow instance. It will create the instance with\n * 'WorkflowExecutionStarted' event in history and also schedule the first DecisionTask for the worker to make the\n * first decision for this instance. It will return 'WorkflowExecutionAlreadyStartedError', if an instance already\n * exists with same workflowId.\n **/\n shared.StartWorkflowExecutionResponse StartWorkflowExecution(1: StartWorkflowExecutionRequest startRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * Returns the information from mutable state of workflow execution.\n * It fails with 'EntityNotExistError' if specified workflow execution in unknown to the service.\n * It returns CurrentBranchChangedError if the workflow version branch has changed.\n **/\n GetMutableStateResponse GetMutableState(1: GetMutableStateRequest getRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.CurrentBranchChangedError currentBranchChangedError,\n )\n\n /**\n * Returns the information from mutable state of workflow execution.\n * It fails with 'EntityNotExistError' if specified workflow execution in unknown to the service.\n * It returns CurrentBranchChangedError if the workflow version branch has changed.\n **/\n PollMutableStateResponse PollMutableState(1: PollMutableStateRequest pollRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.CurrentBranchChangedError currentBranchChangedError,\n )\n\n /**\n * Reset the sticky tasklist related information in mutable state of a given workflow.\n * Things cleared are:\n * 1. StickyTaskList\n * 2. StickyScheduleToStartTimeout\n * 3. ClientLibraryVersion\n * 4. ClientFeatureVersion\n * 5. ClientImpl\n **/\n ResetStickyTaskListResponse ResetStickyTaskList(1: ResetStickyTaskListRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordDecisionTaskStarted is called by the Matchingservice before it hands a decision task to the application worker in response to\n * a PollForDecisionTask call. It records in the history the event that the decision task has started. It will return 'EventAlreadyStartedError',\n * if the workflow's execution history already includes a record of the event starting.\n **/\n RecordDecisionTaskStartedResponse RecordDecisionTaskStarted(1: RecordDecisionTaskStartedRequest addRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: EventAlreadyStartedError eventAlreadyStartedError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordActivityTaskStarted is called by the Matchingservice before it hands a decision task to the application worker in response to\n * a PollForActivityTask call. It records in the history the event that the decision task has started. It will return 'EventAlreadyStartedError',\n * if the workflow's execution history already includes a record of the event starting.\n **/\n RecordActivityTaskStartedResponse RecordActivityTaskStarted(1: RecordActivityTaskStartedRequest addRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: EventAlreadyStartedError eventAlreadyStartedError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondDecisionTaskCompleted is called by application worker to complete a DecisionTask handed as a result of\n * 'PollForDecisionTask' API call. Completing a DecisionTask will result in new events for the workflow execution and\n * potentially new ActivityTask being created for corresponding decisions. It will also create a DecisionTaskCompleted\n * event in the history for that session. Use the 'taskToken' provided as response of PollForDecisionTask API call\n * for completing the DecisionTask.\n **/\n RespondDecisionTaskCompletedResponse RespondDecisionTaskCompleted(1: RespondDecisionTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondDecisionTaskFailed is called by application worker to indicate failure. This results in\n * DecisionTaskFailedEvent written to the history and a new DecisionTask created. This API can be used by client to\n * either clear sticky tasklist or report ny panics during DecisionTask processing.\n **/\n void RespondDecisionTaskFailed(1: RespondDecisionTaskFailedRequest failedRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordActivityTaskHeartbeat is called by application worker while it is processing an ActivityTask. If worker fails\n * to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and\n * 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeat' will\n * fail with 'EntityNotExistsError' in such situations. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for heartbeating.\n **/\n shared.RecordActivityTaskHeartbeatResponse RecordActivityTaskHeartbeat(1: RecordActivityTaskHeartbeatRequest heartbeatRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskCompleted is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new DecisionTask\n * created for the workflow so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCompleted(1: RespondActivityTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskFailed is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskFailed' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskFailed(1: RespondActivityTaskFailedRequest failRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskCanceled is called by application worker when it is successfully canceled an ActivityTask. It will\n * result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCanceled(1: RespondActivityTaskCanceledRequest canceledRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in\n * WorkflowExecutionSignaled event recorded in the history and a decision task being created for the execution.\n **/\n void SignalWorkflowExecution(1: SignalWorkflowExecutionRequest signalRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.LimitExceededError limitExceededError,\n )\n\n /**\n * SignalWithStartWorkflowExecution is used to ensure sending a signal event to a workflow execution.\n * If workflow is running, this results in WorkflowExecutionSignaled event recorded in the history\n * and a decision task being created for the execution.\n * If workflow is not running or not found, it will first try start workflow with given WorkflowIDResuePolicy,\n * and record WorkflowExecutionStarted and WorkflowExecutionSignaled event in case of success.\n * It will return `WorkflowExecutionAlreadyStartedError` if start workflow failed with given policy.\n **/\n shared.StartWorkflowExecutionResponse SignalWithStartWorkflowExecution(1: SignalWithStartWorkflowExecutionRequest signalWithStartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: ShardOwnershipLostError shardOwnershipLostError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.WorkflowExecutionAlreadyStartedError workflowAlreadyStartedError,\n )\n\n /**\n * RemoveSignalMutableState is used to remove a signal request ID that was previously recorded. This is currently\n * used to clean execution info when signal decision finished.\n **/\n void RemoveSignalMutableState(1: RemoveSignalMutableStateRequest removeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * TerminateWorkflowExecution terminates an existing workflow execution by recording WorkflowExecutionTerminated event\n * in the history and immediately terminating the execution instance.\n **/\n void TerminateWorkflowExecution(1: TerminateWorkflowExecutionRequest terminateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * ResetWorkflowExecution reset an existing workflow execution by a firstEventID of a existing event batch\n * in the history and immediately terminating the current execution instance.\n * After reset, the history will grow from nextFirstEventID.\n **/\n shared.ResetWorkflowExecutionResponse ResetWorkflowExecution(1: ResetWorkflowExecutionRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RequestCancelWorkflowExecution is called by application worker when it wants to request cancellation of a workflow instance.\n * It will result in a new 'WorkflowExecutionCancelRequested' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. It fails with 'EntityNotExistsError' if the workflow is not valid\n * anymore due to completion or doesn't exist.\n **/\n void RequestCancelWorkflowExecution(1: RequestCancelWorkflowExecutionRequest cancelRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.CancellationAlreadyRequestedError cancellationAlreadyRequestedError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * ScheduleDecisionTask is used for creating a decision task for already started workflow execution. This is mainly\n * used by transfer queue processor during the processing of StartChildWorkflowExecution task, where it first starts\n * child execution without creating the decision task and then calls this API after updating the mutable state of\n * parent execution.\n **/\n void ScheduleDecisionTask(1: ScheduleDecisionTaskRequest scheduleRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordChildExecutionCompleted is used for reporting the completion of child workflow execution to parent.\n * This is mainly called by transfer queue processor during the processing of DeleteExecution task.\n **/\n void RecordChildExecutionCompleted(1: RecordChildExecutionCompletedRequest completionRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * DescribeWorkflowExecution returns information about the specified workflow execution.\n **/\n shared.DescribeWorkflowExecutionResponse DescribeWorkflowExecution(1: DescribeWorkflowExecutionRequest describeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateEvents(1: ReplicateEventsRequest replicateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskError retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateRawEvents(1: ReplicateRawEventsRequest replicateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskError retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateEventsV2(1: ReplicateEventsV2Request replicateV2Request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskV2Error retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SyncShardStatus sync the status between shards\n **/\n void SyncShardStatus(1: SyncShardStatusRequest syncShardStatusRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SyncActivity sync the activity status\n **/\n void SyncActivity(1: SyncActivityRequest syncActivityRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.RetryTaskError retryTaskError,\n )\n\n /**\n * DescribeMutableState returns information about the internal states of workflow mutable state.\n **/\n DescribeMutableStateResponse DescribeMutableState(1: DescribeMutableStateRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.AccessDeniedError accessDeniedError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.LimitExceededError limitExceededError,\n )\n\n /**\n * DescribeHistoryHost returns information about the internal states of a history host\n **/\n shared.DescribeHistoryHostResponse DescribeHistoryHost(1: shared.DescribeHistoryHostRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * CloseShard close the shard\n **/\n void CloseShard(1: shared.CloseShardRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RemoveTask remove task based on type, taskid, shardid\n **/\n void RemoveTask(1: shared.RemoveTaskRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n replicator.GetReplicationMessagesResponse GetReplicationMessages(1: replicator.GetReplicationMessagesRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.LimitExceededError limitExceededError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * QueryWorkflow returns query result for a specified workflow execution\n **/\n QueryWorkflowResponse QueryWorkflow(1: QueryWorkflowRequest queryRequest)\n\tthrows (\n\t 1: shared.BadRequestError badRequestError,\n\t 2: shared.InternalServiceError internalServiceError,\n\t 3: shared.EntityNotExistsError entityNotExistError,\n\t 4: shared.QueryFailedError queryFailedError,\n\t 5: shared.LimitExceededError limitExceededError,\n\t 6: shared.ServiceBusyError serviceBusyError,\n\t 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n\t)\n\n /**\n * ReapplyEvents applies stale events to the current workflow and current run\n **/\n void ReapplyEvents(1: shared.ReapplyEventsRequest reapplyEventsRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.DomainNotActiveError domainNotActiveError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: ShardOwnershipLostError shardOwnershipLostError,\n 7: shared.EntityNotExistsError entityNotExistError,\n )\n}\n" // HistoryService_CloseShard_Args represents the arguments for the HistoryService.CloseShard function. // diff --git a/.gen/go/replicator/replicator.go b/.gen/go/replicator/replicator.go index 891ce631159..c46fc14187a 100644 --- a/.gen/go/replicator/replicator.go +++ b/.gen/go/replicator/replicator.go @@ -4112,6 +4112,7 @@ type SyncActicvityTaskAttributes struct { Attempt *int32 `json:"attempt,omitempty"` LastFailureReason *string `json:"lastFailureReason,omitempty"` LastWorkerIdentity *string `json:"lastWorkerIdentity,omitempty"` + LastFailureDetails []byte `json:"lastFailureDetails,omitempty"` } // ToWire translates a SyncActicvityTaskAttributes struct into a Thrift-level intermediate @@ -4131,7 +4132,7 @@ type SyncActicvityTaskAttributes struct { // } func (v *SyncActicvityTaskAttributes) ToWire() (wire.Value, error) { var ( - fields [13]wire.Field + fields [14]wire.Field i int = 0 w wire.Value err error @@ -4241,6 +4242,14 @@ func (v *SyncActicvityTaskAttributes) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 130, Value: w} i++ } + if v.LastFailureDetails != nil { + w, err = wire.NewValueBinary(v.LastFailureDetails), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 140, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -4394,6 +4403,14 @@ func (v *SyncActicvityTaskAttributes) FromWire(w wire.Value) error { return err } + } + case 140: + if field.Value.Type() == wire.TBinary { + v.LastFailureDetails, err = field.Value.GetBinary(), error(nil) + if err != nil { + return err + } + } } } @@ -4408,7 +4425,7 @@ func (v *SyncActicvityTaskAttributes) String() string { return "" } - var fields [13]string + var fields [14]string i := 0 if v.DomainId != nil { fields[i] = fmt.Sprintf("DomainId: %v", *(v.DomainId)) @@ -4462,6 +4479,10 @@ func (v *SyncActicvityTaskAttributes) String() string { fields[i] = fmt.Sprintf("LastWorkerIdentity: %v", *(v.LastWorkerIdentity)) i++ } + if v.LastFailureDetails != nil { + fields[i] = fmt.Sprintf("LastFailureDetails: %v", v.LastFailureDetails) + i++ + } return fmt.Sprintf("SyncActicvityTaskAttributes{%v}", strings.Join(fields[:i], ", ")) } @@ -4515,6 +4536,9 @@ func (v *SyncActicvityTaskAttributes) Equals(rhs *SyncActicvityTaskAttributes) b if !_String_EqualsPtr(v.LastWorkerIdentity, rhs.LastWorkerIdentity) { return false } + if !((v.LastFailureDetails == nil && rhs.LastFailureDetails == nil) || (v.LastFailureDetails != nil && rhs.LastFailureDetails != nil && bytes.Equal(v.LastFailureDetails, rhs.LastFailureDetails))) { + return false + } return true } @@ -4564,6 +4588,9 @@ func (v *SyncActicvityTaskAttributes) MarshalLogObject(enc zapcore.ObjectEncoder if v.LastWorkerIdentity != nil { enc.AddString("lastWorkerIdentity", *v.LastWorkerIdentity) } + if v.LastFailureDetails != nil { + enc.AddString("lastFailureDetails", base64.StdEncoding.EncodeToString(v.LastFailureDetails)) + } return err } @@ -4762,6 +4789,21 @@ func (v *SyncActicvityTaskAttributes) IsSetLastWorkerIdentity() bool { return v != nil && v.LastWorkerIdentity != nil } +// GetLastFailureDetails returns the value of LastFailureDetails if it is set or its +// zero value if it is unset. +func (v *SyncActicvityTaskAttributes) GetLastFailureDetails() (o []byte) { + if v != nil && v.LastFailureDetails != nil { + return v.LastFailureDetails + } + + return +} + +// IsSetLastFailureDetails returns true if LastFailureDetails is not nil. +func (v *SyncActicvityTaskAttributes) IsSetLastFailureDetails() bool { + return v != nil && v.LastFailureDetails != nil +} + type SyncShardStatusTaskAttributes struct { SourceCluster *string `json:"sourceCluster,omitempty"` ShardId *int64 `json:"shardId,omitempty"` @@ -4993,11 +5035,11 @@ var ThriftModule = &thriftreflect.ThriftModule{ Name: "replicator", Package: "github.com/uber/cadence/.gen/go/replicator", FilePath: "replicator.thrift", - SHA1: "9168e83c5974c23a1ef2d13146fb85b33852875a", + SHA1: "17c381407d05d599e0a5fcce1f756632f63358cd", Includes: []*thriftreflect.ThriftModule{ shared.ThriftModule, }, Raw: rawIDL, } -const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence.replicator\n\ninclude \"shared.thrift\"\n\nenum ReplicationTaskType {\n Domain\n History\n SyncShardStatus\n SyncActivity\n HistoryMetadata\n HistoryV2\n}\n\nenum DomainOperation {\n Create\n Update\n}\n\nstruct DomainTaskAttributes {\n 05: optional DomainOperation domainOperation\n 10: optional string id\n 20: optional shared.DomainInfo info\n 30: optional shared.DomainConfiguration config\n 40: optional shared.DomainReplicationConfiguration replicationConfig\n 50: optional i64 (js.type = \"Long\") configVersion\n 60: optional i64 (js.type = \"Long\") failoverVersion\n}\n\nstruct HistoryTaskAttributes {\n 05: optional list targetClusters\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") firstEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") version\n 70: optional map replicationInfo\n 80: optional shared.History history\n 90: optional shared.History newRunHistory\n 100: optional i32 eventStoreVersion\n 110: optional i32 newRunEventStoreVersion\n 120: optional bool resetWorkflow\n 130: optional bool newRunNDC\n}\n\nstruct HistoryMetadataTaskAttributes {\n 05: optional list targetClusters\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") firstEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n}\n\nstruct SyncShardStatusTaskAttributes {\n 10: optional string sourceCluster\n 20: optional i64 (js.type = \"Long\") shardId\n 30: optional i64 (js.type = \"Long\") timestamp\n}\n\nstruct SyncActicvityTaskAttributes {\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") version\n 50: optional i64 (js.type = \"Long\") scheduledId\n 60: optional i64 (js.type = \"Long\") scheduledTime\n 70: optional i64 (js.type = \"Long\") startedId\n 80: optional i64 (js.type = \"Long\") startedTime\n 90: optional i64 (js.type = \"Long\") lastHeartbeatTime\n 100: optional binary details\n 110: optional i32 attempt\n 120: optional string lastFailureReason\n 130: optional string lastWorkerIdentity\n}\n\nstruct HistoryTaskV2Attributes {\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional list versionHistoryItems\n 50: optional shared.DataBlob events\n // new run events does not need version history since there is no prior events\n 70: optional shared.DataBlob newRunEvents\n 80: optional bool resetWorkflow\n}\n\nstruct ReplicationTask {\n 10: optional ReplicationTaskType taskType\n 11: optional i64 (js.type = \"Long\") sourceTaskId\n 20: optional DomainTaskAttributes domainTaskAttributes\n 30: optional HistoryTaskAttributes historyTaskAttributes // TODO deprecate once NDC migration is done\n 40: optional SyncShardStatusTaskAttributes syncShardStatusTaskAttributes\n 50: optional SyncActicvityTaskAttributes syncActicvityTaskAttributes\n 60: optional HistoryMetadataTaskAttributes historyMetadataTaskAttributes // TODO deprecate once kafka deprecation is done\n 70: optional HistoryTaskV2Attributes historyTaskV2Attributes\n}\n\nstruct ReplicationToken {\n 10: optional i32 shardID\n // lastRetrivedMessageId is where the next fetch should begin with\n 20: optional i64 (js.type = \"Long\") lastRetrivedMessageId\n // lastProcessedMessageId is the last messageId that is processed on the passive side.\n // This can be different than lastRetrivedMessageId if passive side supports prefetching messages.\n 30: optional i64 (js.type = \"Long\") lastProcessedMessageId\n}\n\nstruct ReplicationMessages {\n 10: optional list replicationTasks\n // This can be different than the last taskId in the above list, because sender can decide to skip tasks (e.g. for completed workflows).\n 20: optional i64 (js.type = \"Long\") lastRetrivedMessageId\n 30: optional bool hasMore // Hint for flow control\n}\n\nstruct GetReplicationMessagesRequest {\n 10: optional list tokens\n}\n\nstruct GetReplicationMessagesResponse {\n 10: optional map messagesByShard\n}" +const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence.replicator\n\ninclude \"shared.thrift\"\n\nenum ReplicationTaskType {\n Domain\n History\n SyncShardStatus\n SyncActivity\n HistoryMetadata\n HistoryV2\n}\n\nenum DomainOperation {\n Create\n Update\n}\n\nstruct DomainTaskAttributes {\n 05: optional DomainOperation domainOperation\n 10: optional string id\n 20: optional shared.DomainInfo info\n 30: optional shared.DomainConfiguration config\n 40: optional shared.DomainReplicationConfiguration replicationConfig\n 50: optional i64 (js.type = \"Long\") configVersion\n 60: optional i64 (js.type = \"Long\") failoverVersion\n}\n\nstruct HistoryTaskAttributes {\n 05: optional list targetClusters\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") firstEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") version\n 70: optional map replicationInfo\n 80: optional shared.History history\n 90: optional shared.History newRunHistory\n 100: optional i32 eventStoreVersion\n 110: optional i32 newRunEventStoreVersion\n 120: optional bool resetWorkflow\n 130: optional bool newRunNDC\n}\n\nstruct HistoryMetadataTaskAttributes {\n 05: optional list targetClusters\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") firstEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n}\n\nstruct SyncShardStatusTaskAttributes {\n 10: optional string sourceCluster\n 20: optional i64 (js.type = \"Long\") shardId\n 30: optional i64 (js.type = \"Long\") timestamp\n}\n\nstruct SyncActicvityTaskAttributes {\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") version\n 50: optional i64 (js.type = \"Long\") scheduledId\n 60: optional i64 (js.type = \"Long\") scheduledTime\n 70: optional i64 (js.type = \"Long\") startedId\n 80: optional i64 (js.type = \"Long\") startedTime\n 90: optional i64 (js.type = \"Long\") lastHeartbeatTime\n 100: optional binary details\n 110: optional i32 attempt\n 120: optional string lastFailureReason\n 130: optional string lastWorkerIdentity\n 140: optional binary lastFailureDetails\n}\n\nstruct HistoryTaskV2Attributes {\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional list versionHistoryItems\n 50: optional shared.DataBlob events\n // new run events does not need version history since there is no prior events\n 70: optional shared.DataBlob newRunEvents\n 80: optional bool resetWorkflow\n}\n\nstruct ReplicationTask {\n 10: optional ReplicationTaskType taskType\n 11: optional i64 (js.type = \"Long\") sourceTaskId\n 20: optional DomainTaskAttributes domainTaskAttributes\n 30: optional HistoryTaskAttributes historyTaskAttributes // TODO deprecate once NDC migration is done\n 40: optional SyncShardStatusTaskAttributes syncShardStatusTaskAttributes\n 50: optional SyncActicvityTaskAttributes syncActicvityTaskAttributes\n 60: optional HistoryMetadataTaskAttributes historyMetadataTaskAttributes // TODO deprecate once kafka deprecation is done\n 70: optional HistoryTaskV2Attributes historyTaskV2Attributes\n}\n\nstruct ReplicationToken {\n 10: optional i32 shardID\n // lastRetrivedMessageId is where the next fetch should begin with\n 20: optional i64 (js.type = \"Long\") lastRetrivedMessageId\n // lastProcessedMessageId is the last messageId that is processed on the passive side.\n // This can be different than lastRetrivedMessageId if passive side supports prefetching messages.\n 30: optional i64 (js.type = \"Long\") lastProcessedMessageId\n}\n\nstruct ReplicationMessages {\n 10: optional list replicationTasks\n // This can be different than the last taskId in the above list, because sender can decide to skip tasks (e.g. for completed workflows).\n 20: optional i64 (js.type = \"Long\") lastRetrivedMessageId\n 30: optional bool hasMore // Hint for flow control\n}\n\nstruct GetReplicationMessagesRequest {\n 10: optional list tokens\n}\n\nstruct GetReplicationMessagesResponse {\n 10: optional map messagesByShard\n}" diff --git a/.gen/go/shared/shared.go b/.gen/go/shared/shared.go index 9e5aa27dea2..f4b16c23975 100644 --- a/.gen/go/shared/shared.go +++ b/.gen/go/shared/shared.go @@ -25707,6 +25707,7 @@ type PendingActivityInfo struct { ExpirationTimestamp *int64 `json:"expirationTimestamp,omitempty"` LastFailureReason *string `json:"lastFailureReason,omitempty"` LastWorkerIdentity *string `json:"lastWorkerIdentity,omitempty"` + LastFailureDetails []byte `json:"lastFailureDetails,omitempty"` } // ToWire translates a PendingActivityInfo struct into a Thrift-level intermediate @@ -25726,7 +25727,7 @@ type PendingActivityInfo struct { // } func (v *PendingActivityInfo) ToWire() (wire.Value, error) { var ( - fields [12]wire.Field + fields [13]wire.Field i int = 0 w wire.Value err error @@ -25828,6 +25829,14 @@ func (v *PendingActivityInfo) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 120, Value: w} i++ } + if v.LastFailureDetails != nil { + w, err = wire.NewValueBinary(v.LastFailureDetails), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 130, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -25975,6 +25984,14 @@ func (v *PendingActivityInfo) FromWire(w wire.Value) error { return err } + } + case 130: + if field.Value.Type() == wire.TBinary { + v.LastFailureDetails, err = field.Value.GetBinary(), error(nil) + if err != nil { + return err + } + } } } @@ -25989,7 +26006,7 @@ func (v *PendingActivityInfo) String() string { return "" } - var fields [12]string + var fields [13]string i := 0 if v.ActivityID != nil { fields[i] = fmt.Sprintf("ActivityID: %v", *(v.ActivityID)) @@ -26039,6 +26056,10 @@ func (v *PendingActivityInfo) String() string { fields[i] = fmt.Sprintf("LastWorkerIdentity: %v", *(v.LastWorkerIdentity)) i++ } + if v.LastFailureDetails != nil { + fields[i] = fmt.Sprintf("LastFailureDetails: %v", v.LastFailureDetails) + i++ + } return fmt.Sprintf("PendingActivityInfo{%v}", strings.Join(fields[:i], ", ")) } @@ -26099,6 +26120,9 @@ func (v *PendingActivityInfo) Equals(rhs *PendingActivityInfo) bool { if !_String_EqualsPtr(v.LastWorkerIdentity, rhs.LastWorkerIdentity) { return false } + if !((v.LastFailureDetails == nil && rhs.LastFailureDetails == nil) || (v.LastFailureDetails != nil && rhs.LastFailureDetails != nil && bytes.Equal(v.LastFailureDetails, rhs.LastFailureDetails))) { + return false + } return true } @@ -26145,6 +26169,9 @@ func (v *PendingActivityInfo) MarshalLogObject(enc zapcore.ObjectEncoder) (err e if v.LastWorkerIdentity != nil { enc.AddString("lastWorkerIdentity", *v.LastWorkerIdentity) } + if v.LastFailureDetails != nil { + enc.AddString("lastFailureDetails", base64.StdEncoding.EncodeToString(v.LastFailureDetails)) + } return err } @@ -26328,6 +26355,21 @@ func (v *PendingActivityInfo) IsSetLastWorkerIdentity() bool { return v != nil && v.LastWorkerIdentity != nil } +// GetLastFailureDetails returns the value of LastFailureDetails if it is set or its +// zero value if it is unset. +func (v *PendingActivityInfo) GetLastFailureDetails() (o []byte) { + if v != nil && v.LastFailureDetails != nil { + return v.LastFailureDetails + } + + return +} + +// IsSetLastFailureDetails returns true if LastFailureDetails is not nil. +func (v *PendingActivityInfo) IsSetLastFailureDetails() bool { + return v != nil && v.LastFailureDetails != nil +} + type PendingActivityState int32 const ( @@ -56776,8 +56818,8 @@ var ThriftModule = &thriftreflect.ThriftModule{ Name: "shared", Package: "github.com/uber/cadence/.gen/go/shared", FilePath: "shared.thrift", - SHA1: "0cce359837fff84563e5a42476dfdd76c1be25bd", + SHA1: "276fecaa9f974201a42c67617ed17d315e74c5cb", Raw: rawIDL, } -const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence\n\nexception BadRequestError {\n 1: required string message\n}\n\nexception InternalServiceError {\n 1: required string message\n}\n\nexception DomainAlreadyExistsError {\n 1: required string message\n}\n\nexception WorkflowExecutionAlreadyStartedError {\n 10: optional string message\n 20: optional string startRequestId\n 30: optional string runId\n}\n\nexception EntityNotExistsError {\n 1: required string message\n}\n\nexception ServiceBusyError {\n 1: required string message\n}\n\nexception CancellationAlreadyRequestedError {\n 1: required string message\n}\n\nexception QueryFailedError {\n 1: required string message\n}\n\nexception DomainNotActiveError {\n 1: required string message\n 2: required string domainName\n 3: required string currentCluster\n 4: required string activeCluster\n}\n\nexception LimitExceededError {\n 1: required string message\n}\n\nexception AccessDeniedError {\n 1: required string message\n}\n\nexception RetryTaskError {\n 1: required string message\n 2: optional string domainId\n 3: optional string workflowId\n 4: optional string runId\n 5: optional i64 (js.type = \"Long\") nextEventId\n}\n\nexception RetryTaskV2Error {\n 1: required string message\n}\n\nexception ClientVersionNotSupportedError {\n 1: required string featureVersion\n 2: required string clientImpl\n 3: required string supportedVersions\n}\n\nexception CurrentBranchChangedError {\n 10: required string message\n 20: required binary currentBranchToken\n}\n\nenum WorkflowIdReusePolicy {\n /*\n * allow start a workflow execution using the same workflow ID,\n * when workflow not running, and the last execution close state is in\n * [terminated, cancelled, timeouted, failed].\n */\n AllowDuplicateFailedOnly,\n /*\n * allow start a workflow execution using the same workflow ID,\n * when workflow not running.\n */\n AllowDuplicate,\n /*\n * do not allow start a workflow execution using the same workflow ID at all\n */\n RejectDuplicate,\n}\n\nenum DomainStatus {\n REGISTERED,\n DEPRECATED,\n DELETED,\n}\n\nenum TimeoutType {\n START_TO_CLOSE,\n SCHEDULE_TO_START,\n SCHEDULE_TO_CLOSE,\n HEARTBEAT,\n}\n\nenum ParentClosePolicy {\n\tABANDON,\n\tREQUEST_CANCEL,\n\tTERMINATE,\n}\n\n\n// whenever this list of decision is changed\n// do change the mutableStateBuilder.go\n// function shouldBufferEvent\n// to make sure wo do the correct event ordering\nenum DecisionType {\n ScheduleActivityTask,\n RequestCancelActivityTask,\n StartTimer,\n CompleteWorkflowExecution,\n FailWorkflowExecution,\n CancelTimer,\n CancelWorkflowExecution,\n RequestCancelExternalWorkflowExecution,\n RecordMarker,\n ContinueAsNewWorkflowExecution,\n StartChildWorkflowExecution,\n SignalExternalWorkflowExecution,\n UpsertWorkflowSearchAttributes,\n}\n\nenum EventType {\n WorkflowExecutionStarted,\n WorkflowExecutionCompleted,\n WorkflowExecutionFailed,\n WorkflowExecutionTimedOut,\n DecisionTaskScheduled,\n DecisionTaskStarted,\n DecisionTaskCompleted,\n DecisionTaskTimedOut\n DecisionTaskFailed,\n ActivityTaskScheduled,\n ActivityTaskStarted,\n ActivityTaskCompleted,\n ActivityTaskFailed,\n ActivityTaskTimedOut,\n ActivityTaskCancelRequested,\n RequestCancelActivityTaskFailed,\n ActivityTaskCanceled,\n TimerStarted,\n TimerFired,\n CancelTimerFailed,\n TimerCanceled,\n WorkflowExecutionCancelRequested,\n WorkflowExecutionCanceled,\n RequestCancelExternalWorkflowExecutionInitiated,\n RequestCancelExternalWorkflowExecutionFailed,\n ExternalWorkflowExecutionCancelRequested,\n MarkerRecorded,\n WorkflowExecutionSignaled,\n WorkflowExecutionTerminated,\n WorkflowExecutionContinuedAsNew,\n StartChildWorkflowExecutionInitiated,\n StartChildWorkflowExecutionFailed,\n ChildWorkflowExecutionStarted,\n ChildWorkflowExecutionCompleted,\n ChildWorkflowExecutionFailed,\n ChildWorkflowExecutionCanceled,\n ChildWorkflowExecutionTimedOut,\n ChildWorkflowExecutionTerminated,\n SignalExternalWorkflowExecutionInitiated,\n SignalExternalWorkflowExecutionFailed,\n ExternalWorkflowExecutionSignaled,\n UpsertWorkflowSearchAttributes,\n}\n\nenum DecisionTaskFailedCause {\n UNHANDLED_DECISION,\n BAD_SCHEDULE_ACTIVITY_ATTRIBUTES,\n BAD_REQUEST_CANCEL_ACTIVITY_ATTRIBUTES,\n BAD_START_TIMER_ATTRIBUTES,\n BAD_CANCEL_TIMER_ATTRIBUTES,\n BAD_RECORD_MARKER_ATTRIBUTES,\n BAD_COMPLETE_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_FAIL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_CANCEL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_CONTINUE_AS_NEW_ATTRIBUTES,\n START_TIMER_DUPLICATE_ID,\n RESET_STICKY_TASKLIST,\n WORKFLOW_WORKER_UNHANDLED_FAILURE,\n BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_START_CHILD_EXECUTION_ATTRIBUTES,\n FORCE_CLOSE_DECISION,\n FAILOVER_CLOSE_DECISION,\n BAD_SIGNAL_INPUT_SIZE,\n RESET_WORKFLOW,\n BAD_BINARY,\n SCHEDULE_ACTIVITY_DUPLICATE_ID,\n BAD_SEARCH_ATTRIBUTES,\n}\n\nenum CancelExternalWorkflowExecutionFailedCause {\n UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION,\n}\n\nenum SignalExternalWorkflowExecutionFailedCause {\n UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION,\n}\n\nenum ChildWorkflowExecutionFailedCause {\n WORKFLOW_ALREADY_RUNNING,\n}\n\n// TODO: when migrating to gRPC, add a running / none status,\n// currently, customer is using null / nil as an indication\n// that workflow is still running\nenum WorkflowExecutionCloseStatus {\n COMPLETED,\n FAILED,\n CANCELED,\n TERMINATED,\n CONTINUED_AS_NEW,\n TIMED_OUT,\n}\n\nenum QueryTaskCompletedType {\n COMPLETED,\n FAILED,\n}\n\nenum QueryResultType {\n ANSWERED,\n FAILED,\n}\n\nenum PendingActivityState {\n SCHEDULED,\n STARTED,\n CANCEL_REQUESTED,\n}\n\nenum HistoryEventFilterType {\n ALL_EVENT,\n CLOSE_EVENT,\n}\n\nenum TaskListKind {\n NORMAL,\n STICKY,\n}\n\nenum ArchivalStatus {\n DISABLED,\n ENABLED,\n}\n\nenum IndexedValueType {\n STRING,\n KEYWORD,\n INT,\n DOUBLE,\n BOOL,\n DATETIME,\n}\n\nstruct Header {\n 10: optional map fields\n}\n\nstruct WorkflowType {\n 10: optional string name\n}\n\nstruct ActivityType {\n 10: optional string name\n}\n\nstruct TaskList {\n 10: optional string name\n 20: optional TaskListKind kind\n}\n\nenum EncodingType {\n ThriftRW,\n JSON,\n}\n\nenum QueryRejectCondition {\n // NOT_OPEN indicates that query should be rejected if workflow is not open\n NOT_OPEN\n // NOT_COMPLETED_CLEANLY indicates that query should be rejected if workflow did not complete cleanly\n NOT_COMPLETED_CLEANLY\n}\n\nstruct DataBlob {\n 10: optional EncodingType EncodingType\n 20: optional binary Data\n}\n\nstruct ReplicationInfo {\n 10: optional i64 (js.type = \"Long\") version\n 20: optional i64 (js.type = \"Long\") lastEventId\n}\n\nstruct TaskListMetadata {\n 10: optional double maxTasksPerSecond\n}\n\nstruct WorkflowExecution {\n 10: optional string workflowId\n 20: optional string runId\n}\n\nstruct Memo {\n 10: optional map fields\n}\n\nstruct SearchAttributes {\n 10: optional map indexedFields\n}\n\nstruct WorkflowExecutionInfo {\n 10: optional WorkflowExecution execution\n 20: optional WorkflowType type\n 30: optional i64 (js.type = \"Long\") startTime\n 40: optional i64 (js.type = \"Long\") closeTime\n 50: optional WorkflowExecutionCloseStatus closeStatus\n 60: optional i64 (js.type = \"Long\") historyLength\n 70: optional string parentDomainId\n 80: optional WorkflowExecution parentExecution\n 90: optional i64 (js.type = \"Long\") executionTime\n 100: optional Memo memo\n 101: optional SearchAttributes searchAttributes\n 110: optional ResetPoints autoResetPoints\n}\n\nstruct WorkflowExecutionConfiguration {\n 10: optional TaskList taskList\n 20: optional i32 executionStartToCloseTimeoutSeconds\n 30: optional i32 taskStartToCloseTimeoutSeconds\n// 40: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n}\n\nstruct TransientDecisionInfo {\n 10: optional HistoryEvent scheduledEvent\n 20: optional HistoryEvent startedEvent\n}\n\nstruct ScheduleActivityTaskDecisionAttributes {\n 10: optional string activityId\n 20: optional ActivityType activityType\n 25: optional string domain\n 30: optional TaskList taskList\n 40: optional binary input\n 45: optional i32 scheduleToCloseTimeoutSeconds\n 50: optional i32 scheduleToStartTimeoutSeconds\n 55: optional i32 startToCloseTimeoutSeconds\n 60: optional i32 heartbeatTimeoutSeconds\n 70: optional RetryPolicy retryPolicy\n 80: optional Header header\n}\n\nstruct RequestCancelActivityTaskDecisionAttributes {\n 10: optional string activityId\n}\n\nstruct StartTimerDecisionAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startToFireTimeoutSeconds\n}\n\nstruct CompleteWorkflowExecutionDecisionAttributes {\n 10: optional binary result\n}\n\nstruct FailWorkflowExecutionDecisionAttributes {\n 10: optional string reason\n 20: optional binary details\n}\n\nstruct CancelTimerDecisionAttributes {\n 10: optional string timerId\n}\n\nstruct CancelWorkflowExecutionDecisionAttributes {\n 10: optional binary details\n}\n\nstruct RequestCancelExternalWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional string runId\n 40: optional binary control\n 50: optional bool childWorkflowOnly\n}\n\nstruct SignalExternalWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional string signalName\n 40: optional binary input\n 50: optional binary control\n 60: optional bool childWorkflowOnly\n}\n\nstruct UpsertWorkflowSearchAttributesDecisionAttributes {\n 10: optional SearchAttributes searchAttributes\n}\n\nstruct RecordMarkerDecisionAttributes {\n 10: optional string markerName\n 20: optional binary details\n 30: optional Header header\n}\n\nstruct ContinueAsNewWorkflowExecutionDecisionAttributes {\n 10: optional WorkflowType workflowType\n 20: optional TaskList taskList\n 30: optional binary input\n 40: optional i32 executionStartToCloseTimeoutSeconds\n 50: optional i32 taskStartToCloseTimeoutSeconds\n 60: optional i32 backoffStartIntervalInSeconds\n 70: optional RetryPolicy retryPolicy\n 80: optional ContinueAsNewInitiator initiator\n 90: optional string failureReason\n 100: optional binary failureDetails\n 110: optional binary lastCompletionResult\n 120: optional string cronSchedule\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n}\n\nstruct StartChildWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n// 80: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 81: optional ParentClosePolicy parentClosePolicy\n 90: optional binary control\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 110: optional RetryPolicy retryPolicy\n 120: optional string cronSchedule\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n}\n\nstruct Decision {\n 10: optional DecisionType decisionType\n 20: optional ScheduleActivityTaskDecisionAttributes scheduleActivityTaskDecisionAttributes\n 25: optional StartTimerDecisionAttributes startTimerDecisionAttributes\n 30: optional CompleteWorkflowExecutionDecisionAttributes completeWorkflowExecutionDecisionAttributes\n 35: optional FailWorkflowExecutionDecisionAttributes failWorkflowExecutionDecisionAttributes\n 40: optional RequestCancelActivityTaskDecisionAttributes requestCancelActivityTaskDecisionAttributes\n 50: optional CancelTimerDecisionAttributes cancelTimerDecisionAttributes\n 60: optional CancelWorkflowExecutionDecisionAttributes cancelWorkflowExecutionDecisionAttributes\n 70: optional RequestCancelExternalWorkflowExecutionDecisionAttributes requestCancelExternalWorkflowExecutionDecisionAttributes\n 80: optional RecordMarkerDecisionAttributes recordMarkerDecisionAttributes\n 90: optional ContinueAsNewWorkflowExecutionDecisionAttributes continueAsNewWorkflowExecutionDecisionAttributes\n 100: optional StartChildWorkflowExecutionDecisionAttributes startChildWorkflowExecutionDecisionAttributes\n 110: optional SignalExternalWorkflowExecutionDecisionAttributes signalExternalWorkflowExecutionDecisionAttributes\n 120: optional UpsertWorkflowSearchAttributesDecisionAttributes upsertWorkflowSearchAttributesDecisionAttributes\n}\n\nstruct WorkflowExecutionStartedEventAttributes {\n 10: optional WorkflowType workflowType\n 12: optional string parentWorkflowDomain\n 14: optional WorkflowExecution parentWorkflowExecution\n 16: optional i64 (js.type = \"Long\") parentInitiatedEventId\n 20: optional TaskList taskList\n 30: optional binary input\n 40: optional i32 executionStartToCloseTimeoutSeconds\n 50: optional i32 taskStartToCloseTimeoutSeconds\n// 52: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 54: optional string continuedExecutionRunId\n 55: optional ContinueAsNewInitiator initiator\n 56: optional string continuedFailureReason\n 57: optional binary continuedFailureDetails\n 58: optional binary lastCompletionResult\n 59: optional string originalExecutionRunId // This is the runID when the WorkflowExecutionStarted event is written\n 60: optional string identity\n 61: optional string firstExecutionRunId // This is the very first runID along the chain of ContinueAsNew and Reset.\n 70: optional RetryPolicy retryPolicy\n 80: optional i32 attempt\n 90: optional i64 (js.type = \"Long\") expirationTimestamp\n 100: optional string cronSchedule\n 110: optional i32 firstDecisionTaskBackoffSeconds\n 120: optional Memo memo\n 121: optional SearchAttributes searchAttributes\n 130: optional ResetPoints prevAutoResetPoints\n 140: optional Header header\n}\n\nstruct ResetPoints{\n 10: optional list points\n}\n\n struct ResetPointInfo{\n 10: optional string binaryChecksum\n 20: optional string runId\n 30: optional i64 firstDecisionCompletedId\n 40: optional i64 (js.type = \"Long\") createdTimeNano\n 50: optional i64 (js.type = \"Long\") expiringTimeNano //the time that the run is deleted due to retention\n 60: optional bool resettable // false if the resset point has pending childWFs/reqCancels/signalExternals.\n}\n\nstruct WorkflowExecutionCompletedEventAttributes {\n 10: optional binary result\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct WorkflowExecutionFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct WorkflowExecutionTimedOutEventAttributes {\n 10: optional TimeoutType timeoutType\n}\n\nenum ContinueAsNewInitiator {\n Decider,\n RetryPolicy,\n CronSchedule,\n}\n\nstruct WorkflowExecutionContinuedAsNewEventAttributes {\n 10: optional string newExecutionRunId\n 20: optional WorkflowType workflowType\n 30: optional TaskList taskList\n 40: optional binary input\n 50: optional i32 executionStartToCloseTimeoutSeconds\n 60: optional i32 taskStartToCloseTimeoutSeconds\n 70: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 80: optional i32 backoffStartIntervalInSeconds\n 90: optional ContinueAsNewInitiator initiator\n 100: optional string failureReason\n 110: optional binary failureDetails\n 120: optional binary lastCompletionResult\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n}\n\nstruct DecisionTaskScheduledEventAttributes {\n 10: optional TaskList taskList\n 20: optional i32 startToCloseTimeoutSeconds\n 30: optional i64 (js.type = \"Long\") attempt\n}\n\nstruct DecisionTaskStartedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional string identity\n 30: optional string requestId\n}\n\nstruct DecisionTaskCompletedEventAttributes {\n 10: optional binary executionContext\n 20: optional i64 (js.type = \"Long\") scheduledEventId\n 30: optional i64 (js.type = \"Long\") startedEventId\n 40: optional string identity\n 50: optional string binaryChecksum\n}\n\nstruct DecisionTaskTimedOutEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional TimeoutType timeoutType\n}\n\nstruct DecisionTaskFailedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional DecisionTaskFailedCause cause\n 35: optional binary details\n 40: optional string identity\n 50: optional string reason\n // for reset workflow\n 60: optional string baseRunId\n 70: optional string newRunId\n 80: optional i64 (js.type = \"Long\") forkEventVersion\n}\n\nstruct ActivityTaskScheduledEventAttributes {\n 10: optional string activityId\n 20: optional ActivityType activityType\n 25: optional string domain\n 30: optional TaskList taskList\n 40: optional binary input\n 45: optional i32 scheduleToCloseTimeoutSeconds\n 50: optional i32 scheduleToStartTimeoutSeconds\n 55: optional i32 startToCloseTimeoutSeconds\n 60: optional i32 heartbeatTimeoutSeconds\n 90: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 110: optional RetryPolicy retryPolicy\n 120: optional Header header\n}\n\nstruct ActivityTaskStartedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional string identity\n 30: optional string requestId\n 40: optional i32 attempt\n}\n\nstruct ActivityTaskCompletedEventAttributes {\n 10: optional binary result\n 20: optional i64 (js.type = \"Long\") scheduledEventId\n 30: optional i64 (js.type = \"Long\") startedEventId\n 40: optional string identity\n}\n\nstruct ActivityTaskFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional string identity\n}\n\nstruct ActivityTaskTimedOutEventAttributes {\n 05: optional binary details\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional TimeoutType timeoutType\n}\n\nstruct ActivityTaskCancelRequestedEventAttributes {\n 10: optional string activityId\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct RequestCancelActivityTaskFailedEventAttributes{\n 10: optional string activityId\n 20: optional string cause\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct ActivityTaskCanceledEventAttributes {\n 10: optional binary details\n 20: optional i64 (js.type = \"Long\") latestCancelRequestedEventId\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional string identity\n}\n\nstruct TimerStartedEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startToFireTimeoutSeconds\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct TimerFiredEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct TimerCanceledEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional string identity\n}\n\nstruct CancelTimerFailedEventAttributes {\n 10: optional string timerId\n 20: optional string cause\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional string identity\n}\n\nstruct WorkflowExecutionCancelRequestedEventAttributes {\n 10: optional string cause\n 20: optional i64 (js.type = \"Long\") externalInitiatedEventId\n 30: optional WorkflowExecution externalWorkflowExecution\n 40: optional string identity\n}\n\nstruct WorkflowExecutionCanceledEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional binary details\n}\n\nstruct MarkerRecordedEventAttributes {\n 10: optional string markerName\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional Header header\n}\n\nstruct WorkflowExecutionSignaledEventAttributes {\n 10: optional string signalName\n 20: optional binary input\n 30: optional string identity\n}\n\nstruct WorkflowExecutionTerminatedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RequestCancelExternalWorkflowExecutionInitiatedEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional binary control\n 50: optional bool childWorkflowOnly\n}\n\nstruct RequestCancelExternalWorkflowExecutionFailedEventAttributes {\n 10: optional CancelExternalWorkflowExecutionFailedCause cause\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional binary control\n}\n\nstruct ExternalWorkflowExecutionCancelRequestedEventAttributes {\n 10: optional i64 (js.type = \"Long\") initiatedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n}\n\nstruct SignalExternalWorkflowExecutionInitiatedEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional string signalName\n 50: optional binary input\n 60: optional binary control\n 70: optional bool childWorkflowOnly\n}\n\nstruct SignalExternalWorkflowExecutionFailedEventAttributes {\n 10: optional SignalExternalWorkflowExecutionFailedCause cause\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional binary control\n}\n\nstruct ExternalWorkflowExecutionSignaledEventAttributes {\n 10: optional i64 (js.type = \"Long\") initiatedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional binary control\n}\n\nstruct UpsertWorkflowSearchAttributesEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional SearchAttributes searchAttributes\n}\n\nstruct StartChildWorkflowExecutionInitiatedEventAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n// 80: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 81: optional ParentClosePolicy parentClosePolicy\n 90: optional binary control\n 100: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 110: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 120: optional RetryPolicy retryPolicy\n 130: optional string cronSchedule\n 140: optional Header header\n 150: optional Memo memo\n 160: optional SearchAttributes searchAttributes\n}\n\nstruct StartChildWorkflowExecutionFailedEventAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional ChildWorkflowExecutionFailedCause cause\n 50: optional binary control\n 60: optional i64 (js.type = \"Long\") initiatedEventId\n 70: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct ChildWorkflowExecutionStartedEventAttributes {\n 10: optional string domain\n 20: optional i64 (js.type = \"Long\") initiatedEventId\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional Header header\n}\n\nstruct ChildWorkflowExecutionCompletedEventAttributes {\n 10: optional binary result\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional WorkflowType workflowType\n 60: optional i64 (js.type = \"Long\") initiatedEventId\n 70: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionCanceledEventAttributes {\n 10: optional binary details\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionTimedOutEventAttributes {\n 10: optional TimeoutType timeoutType\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionTerminatedEventAttributes {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional WorkflowType workflowType\n 40: optional i64 (js.type = \"Long\") initiatedEventId\n 50: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct HistoryEvent {\n 10: optional i64 (js.type = \"Long\") eventId\n 20: optional i64 (js.type = \"Long\") timestamp\n 30: optional EventType eventType\n 35: optional i64 (js.type = \"Long\") version\n 36: optional i64 (js.type = \"Long\") taskId\n 40: optional WorkflowExecutionStartedEventAttributes workflowExecutionStartedEventAttributes\n 50: optional WorkflowExecutionCompletedEventAttributes workflowExecutionCompletedEventAttributes\n 60: optional WorkflowExecutionFailedEventAttributes workflowExecutionFailedEventAttributes\n 70: optional WorkflowExecutionTimedOutEventAttributes workflowExecutionTimedOutEventAttributes\n 80: optional DecisionTaskScheduledEventAttributes decisionTaskScheduledEventAttributes\n 90: optional DecisionTaskStartedEventAttributes decisionTaskStartedEventAttributes\n 100: optional DecisionTaskCompletedEventAttributes decisionTaskCompletedEventAttributes\n 110: optional DecisionTaskTimedOutEventAttributes decisionTaskTimedOutEventAttributes\n 120: optional DecisionTaskFailedEventAttributes decisionTaskFailedEventAttributes\n 130: optional ActivityTaskScheduledEventAttributes activityTaskScheduledEventAttributes\n 140: optional ActivityTaskStartedEventAttributes activityTaskStartedEventAttributes\n 150: optional ActivityTaskCompletedEventAttributes activityTaskCompletedEventAttributes\n 160: optional ActivityTaskFailedEventAttributes activityTaskFailedEventAttributes\n 170: optional ActivityTaskTimedOutEventAttributes activityTaskTimedOutEventAttributes\n 180: optional TimerStartedEventAttributes timerStartedEventAttributes\n 190: optional TimerFiredEventAttributes timerFiredEventAttributes\n 200: optional ActivityTaskCancelRequestedEventAttributes activityTaskCancelRequestedEventAttributes\n 210: optional RequestCancelActivityTaskFailedEventAttributes requestCancelActivityTaskFailedEventAttributes\n 220: optional ActivityTaskCanceledEventAttributes activityTaskCanceledEventAttributes\n 230: optional TimerCanceledEventAttributes timerCanceledEventAttributes\n 240: optional CancelTimerFailedEventAttributes cancelTimerFailedEventAttributes\n 250: optional MarkerRecordedEventAttributes markerRecordedEventAttributes\n 260: optional WorkflowExecutionSignaledEventAttributes workflowExecutionSignaledEventAttributes\n 270: optional WorkflowExecutionTerminatedEventAttributes workflowExecutionTerminatedEventAttributes\n 280: optional WorkflowExecutionCancelRequestedEventAttributes workflowExecutionCancelRequestedEventAttributes\n 290: optional WorkflowExecutionCanceledEventAttributes workflowExecutionCanceledEventAttributes\n 300: optional RequestCancelExternalWorkflowExecutionInitiatedEventAttributes requestCancelExternalWorkflowExecutionInitiatedEventAttributes\n 310: optional RequestCancelExternalWorkflowExecutionFailedEventAttributes requestCancelExternalWorkflowExecutionFailedEventAttributes\n 320: optional ExternalWorkflowExecutionCancelRequestedEventAttributes externalWorkflowExecutionCancelRequestedEventAttributes\n 330: optional WorkflowExecutionContinuedAsNewEventAttributes workflowExecutionContinuedAsNewEventAttributes\n 340: optional StartChildWorkflowExecutionInitiatedEventAttributes startChildWorkflowExecutionInitiatedEventAttributes\n 350: optional StartChildWorkflowExecutionFailedEventAttributes startChildWorkflowExecutionFailedEventAttributes\n 360: optional ChildWorkflowExecutionStartedEventAttributes childWorkflowExecutionStartedEventAttributes\n 370: optional ChildWorkflowExecutionCompletedEventAttributes childWorkflowExecutionCompletedEventAttributes\n 380: optional ChildWorkflowExecutionFailedEventAttributes childWorkflowExecutionFailedEventAttributes\n 390: optional ChildWorkflowExecutionCanceledEventAttributes childWorkflowExecutionCanceledEventAttributes\n 400: optional ChildWorkflowExecutionTimedOutEventAttributes childWorkflowExecutionTimedOutEventAttributes\n 410: optional ChildWorkflowExecutionTerminatedEventAttributes childWorkflowExecutionTerminatedEventAttributes\n 420: optional SignalExternalWorkflowExecutionInitiatedEventAttributes signalExternalWorkflowExecutionInitiatedEventAttributes\n 430: optional SignalExternalWorkflowExecutionFailedEventAttributes signalExternalWorkflowExecutionFailedEventAttributes\n 440: optional ExternalWorkflowExecutionSignaledEventAttributes externalWorkflowExecutionSignaledEventAttributes\n 450: optional UpsertWorkflowSearchAttributesEventAttributes upsertWorkflowSearchAttributesEventAttributes\n}\n\nstruct History {\n 10: optional list events\n}\n\nstruct WorkflowExecutionFilter {\n 10: optional string workflowId\n 20: optional string runId\n}\n\nstruct WorkflowTypeFilter {\n 10: optional string name\n}\n\nstruct StartTimeFilter {\n 10: optional i64 (js.type = \"Long\") earliestTime\n 20: optional i64 (js.type = \"Long\") latestTime\n}\n\nstruct DomainInfo {\n 10: optional string name\n 20: optional DomainStatus status\n 30: optional string description\n 40: optional string ownerEmail\n // A key-value map for any customized purpose\n 50: optional map data\n 60: optional string uuid\n}\n\nstruct DomainConfiguration {\n 10: optional i32 workflowExecutionRetentionPeriodInDays\n 20: optional bool emitMetric\n 70: optional BadBinaries badBinaries\n 80: optional ArchivalStatus historyArchivalStatus\n 90: optional string historyArchivalURI\n 100: optional ArchivalStatus visibilityArchivalStatus\n 110: optional string visibilityArchivalURI\n}\n\nstruct BadBinaries{\n 10: optional map binaries\n}\n\nstruct BadBinaryInfo{\n 10: optional string reason\n 20: optional string operator\n 30: optional i64 (js.type = \"Long\") createdTimeNano\n}\n\nstruct UpdateDomainInfo {\n 10: optional string description\n 20: optional string ownerEmail\n // A key-value map for any customized purpose\n 30: optional map data\n}\n\nstruct ClusterReplicationConfiguration {\n 10: optional string clusterName\n}\n\nstruct DomainReplicationConfiguration {\n 10: optional string activeClusterName\n 20: optional list clusters\n}\n\nstruct RegisterDomainRequest {\n 10: optional string name\n 20: optional string description\n 30: optional string ownerEmail\n 40: optional i32 workflowExecutionRetentionPeriodInDays\n 50: optional bool emitMetric\n 60: optional list clusters\n 70: optional string activeClusterName\n // A key-value map for any customized purpose\n 80: optional map data\n 90: optional string securityToken\n 120: optional bool isGlobalDomain\n 130: optional ArchivalStatus historyArchivalStatus\n 140: optional string historyArchivalURI\n 150: optional ArchivalStatus visibilityArchivalStatus\n 160: optional string visibilityArchivalURI\n}\n\nstruct ListDomainsRequest {\n 10: optional i32 pageSize\n 20: optional binary nextPageToken\n}\n\nstruct ListDomainsResponse {\n 10: optional list domains\n 20: optional binary nextPageToken\n}\n\nstruct DescribeDomainRequest {\n 10: optional string name\n 20: optional string uuid\n}\n\nstruct DescribeDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n}\n\nstruct UpdateDomainRequest {\n 10: optional string name\n 20: optional UpdateDomainInfo updatedInfo\n 30: optional DomainConfiguration configuration\n 40: optional DomainReplicationConfiguration replicationConfiguration\n 50: optional string securityToken\n 60: optional string deleteBadBinary\n}\n\nstruct UpdateDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n}\n\nstruct DeprecateDomainRequest {\n 10: optional string name\n 20: optional string securityToken\n}\n\nstruct StartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n 80: optional string identity\n 90: optional string requestId\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n// 110: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 120: optional RetryPolicy retryPolicy\n 130: optional string cronSchedule\n 140: optional Memo memo\n 141: optional SearchAttributes searchAttributes\n 150: optional Header header\n}\n\nstruct StartWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct PollForDecisionTaskRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional string identity\n 40: optional string binaryChecksum\n}\n\nstruct PollForDecisionTaskResponse {\n 10: optional binary taskToken\n 20: optional WorkflowExecution workflowExecution\n 30: optional WorkflowType workflowType\n 40: optional i64 (js.type = \"Long\") previousStartedEventId\n 50: optional i64 (js.type = \"Long\") startedEventId\n 51: optional i64 (js.type = 'Long') attempt\n 54: optional i64 (js.type = \"Long\") backlogCountHint\n 60: optional History history\n 70: optional binary nextPageToken\n 80: optional WorkflowQuery query\n 90: optional TaskList WorkflowExecutionTaskList\n 100: optional i64 (js.type = \"Long\") scheduledTimestamp\n 110: optional i64 (js.type = \"Long\") startedTimestamp\n 120: optional list queries\n}\n\nstruct StickyExecutionAttributes {\n 10: optional TaskList workerTaskList\n 20: optional i32 scheduleToStartTimeoutSeconds\n}\n\nstruct RespondDecisionTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional list decisions\n 30: optional binary executionContext\n 40: optional string identity\n 50: optional StickyExecutionAttributes stickyAttributes\n 60: optional bool returnNewDecisionTask\n 70: optional bool forceCreateNewDecisionTask\n 80: optional string binaryChecksum\n 90: optional list queryResults\n}\n\nstruct RespondDecisionTaskCompletedResponse {\n 10: optional PollForDecisionTaskResponse decisionTask\n}\n\nstruct RespondDecisionTaskFailedRequest {\n 10: optional binary taskToken\n 20: optional DecisionTaskFailedCause cause\n 30: optional binary details\n 40: optional string identity\n}\n\nstruct PollForActivityTaskRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional string identity\n 40: optional TaskListMetadata taskListMetadata\n}\n\nstruct PollForActivityTaskResponse {\n 10: optional binary taskToken\n 20: optional WorkflowExecution workflowExecution\n 30: optional string activityId\n 40: optional ActivityType activityType\n 50: optional binary input\n 70: optional i64 (js.type = \"Long\") scheduledTimestamp\n 80: optional i32 scheduleToCloseTimeoutSeconds\n 90: optional i64 (js.type = \"Long\") startedTimestamp\n 100: optional i32 startToCloseTimeoutSeconds\n 110: optional i32 heartbeatTimeoutSeconds\n 120: optional i32 attempt\n 130: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 140: optional binary heartbeatDetails\n 150: optional WorkflowType workflowType\n 160: optional string workflowDomain\n 170: optional Header header\n}\n\nstruct RecordActivityTaskHeartbeatRequest {\n 10: optional binary taskToken\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RecordActivityTaskHeartbeatByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary details\n 60: optional string identity\n}\n\nstruct RecordActivityTaskHeartbeatResponse {\n 10: optional bool cancelRequested\n}\n\nstruct RespondActivityTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional binary result\n 30: optional string identity\n}\n\nstruct RespondActivityTaskFailedRequest {\n 10: optional binary taskToken\n 20: optional string reason\n 30: optional binary details\n 40: optional string identity\n}\n\nstruct RespondActivityTaskCanceledRequest {\n 10: optional binary taskToken\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RespondActivityTaskCompletedByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary result\n 60: optional string identity\n}\n\nstruct RespondActivityTaskFailedByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional string reason\n 60: optional binary details\n 70: optional string identity\n}\n\nstruct RespondActivityTaskCanceledByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary details\n 60: optional string identity\n}\n\nstruct RequestCancelWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string identity\n 40: optional string requestId\n}\n\nstruct GetWorkflowExecutionHistoryRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional i32 maximumPageSize\n 40: optional binary nextPageToken\n 50: optional bool waitForNewEvent\n 60: optional HistoryEventFilterType HistoryEventFilterType\n}\n\nstruct GetWorkflowExecutionHistoryResponse {\n 10: optional History history\n 20: optional binary nextPageToken\n 30: optional bool archived\n}\n\nstruct SignalWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string signalName\n 40: optional binary input\n 50: optional string identity\n 60: optional string requestId\n 70: optional binary control\n}\n\nstruct SignalWithStartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n 80: optional string identity\n 90: optional string requestId\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 110: optional string signalName\n 120: optional binary signalInput\n 130: optional binary control\n 140: optional RetryPolicy retryPolicy\n 150: optional string cronSchedule\n 160: optional Memo memo\n 161: optional SearchAttributes searchAttributes\n 170: optional Header header\n}\n\nstruct TerminateWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional binary details\n 50: optional string identity\n}\n\nstruct ResetWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional i64 (js.type = \"Long\") decisionFinishEventId\n 50: optional string requestId\n}\n\nstruct ResetWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct ListOpenWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 maximumPageSize\n 30: optional binary nextPageToken\n 40: optional StartTimeFilter StartTimeFilter\n 50: optional WorkflowExecutionFilter executionFilter\n 60: optional WorkflowTypeFilter typeFilter\n}\n\nstruct ListOpenWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListClosedWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 maximumPageSize\n 30: optional binary nextPageToken\n 40: optional StartTimeFilter StartTimeFilter\n 50: optional WorkflowExecutionFilter executionFilter\n 60: optional WorkflowTypeFilter typeFilter\n 70: optional WorkflowExecutionCloseStatus statusFilter\n}\n\nstruct ListClosedWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 pageSize\n 30: optional binary nextPageToken\n 40: optional string query\n}\n\nstruct ListWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListArchivedWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 pageSize\n 30: optional binary nextPageToken\n 40: optional string query\n}\n\nstruct ListArchivedWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct CountWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional string query\n}\n\nstruct CountWorkflowExecutionsResponse {\n 10: optional i64 count\n}\n\nstruct GetSearchAttributesResponse {\n 10: optional map keys\n}\n\nstruct QueryWorkflowRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional WorkflowQuery query\n // QueryRejectCondition can used to reject the query if workflow state does not satisify condition\n 40: optional QueryRejectCondition queryRejectCondition\n}\n\nstruct QueryRejected {\n 10: optional WorkflowExecutionCloseStatus closeStatus\n}\n\nstruct QueryWorkflowResponse {\n 10: optional binary queryResult\n 20: optional QueryRejected queryRejected\n}\n\nstruct WorkflowQuery {\n 10: optional string queryType\n 20: optional binary queryArgs\n}\n\nstruct ResetStickyTaskListRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n\nstruct ResetStickyTaskListResponse {\n // The reason to keep this response is to allow returning\n // information in the future.\n}\n\nstruct RespondQueryTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional QueryTaskCompletedType completedType\n 30: optional binary queryResult\n 40: optional string errorMessage\n}\n\nstruct WorkflowQueryResult {\n 10: optional QueryResultType resultType\n 20: optional binary answer\n 30: optional string errorReason\n 40: optional binary errorDetails\n}\n\nstruct DescribeWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n\nstruct PendingActivityInfo {\n 10: optional string activityID\n 20: optional ActivityType activityType\n 30: optional PendingActivityState state\n 40: optional binary heartbeatDetails\n 50: optional i64 (js.type = \"Long\") lastHeartbeatTimestamp\n 60: optional i64 (js.type = \"Long\") lastStartedTimestamp\n 70: optional i32 attempt\n 80: optional i32 maximumAttempts\n 90: optional i64 (js.type = \"Long\") scheduledTimestamp\n 100: optional i64 (js.type = \"Long\") expirationTimestamp\n 110: optional string lastFailureReason\n 120: optional string lastWorkerIdentity\n}\n\nstruct PendingChildExecutionInfo {\n 10: optional string workflowID\n 20: optional string runID\n 30: optional string workflowTypName\n 40: optional i64 (js.type = \"Long\") initiatedID\n 50: optional ParentClosePolicy parentClosePolicy\n}\n\nstruct DescribeWorkflowExecutionResponse {\n 10: optional WorkflowExecutionConfiguration executionConfiguration\n 20: optional WorkflowExecutionInfo workflowExecutionInfo\n 30: optional list pendingActivities\n 40: optional list pendingChildren\n}\n\nstruct DescribeTaskListRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional TaskListType taskListType\n 40: optional bool includeTaskListStatus\n}\n\nstruct DescribeTaskListResponse {\n 10: optional list pollers\n 20: optional TaskListStatus taskListStatus\n}\n\nstruct TaskListStatus {\n 10: optional i64 (js.type = \"Long\") backlogCountHint\n 20: optional i64 (js.type = \"Long\") readLevel\n 30: optional i64 (js.type = \"Long\") ackLevel\n 35: optional double ratePerSecond\n 40: optional TaskIDBlock taskIDBlock\n}\n\nstruct TaskIDBlock {\n 10: optional i64 (js.type = \"Long\") startID\n 20: optional i64 (js.type = \"Long\") endID\n}\n\n//At least one of the parameters needs to be provided\nstruct DescribeHistoryHostRequest {\n 10: optional string hostAddress //ip:port\n 20: optional i32 shardIdForHost\n 30: optional WorkflowExecution executionForHost\n}\n\nstruct RemoveTaskRequest {\n 10: optional i32 shardID\n 20: optional i32 type\n 30: optional i64 (js.type = \"Long\") taskID\n}\n\nstruct CloseShardRequest {\n 10: optional i32 shardID\n}\n\nstruct DescribeHistoryHostResponse{\n 10: optional i32 numberOfShards\n 20: optional list shardIDs\n 30: optional DomainCacheInfo domainCache\n 40: optional string shardControllerStatus\n 50: optional string address\n}\n\nstruct DomainCacheInfo{\n 10: optional i64 numOfItemsInCacheByID\n 20: optional i64 numOfItemsInCacheByName\n}\n\nenum TaskListType {\n /*\n * Decision type of tasklist\n */\n Decision,\n /*\n * Activity type of tasklist\n */\n Activity,\n}\n\nstruct PollerInfo {\n // Unix Nano\n 10: optional i64 (js.type = \"Long\") lastAccessTime\n 20: optional string identity\n 30: optional double ratePerSecond\n}\n\nstruct RetryPolicy {\n // Interval of the first retry. If coefficient is 1.0 then it is used for all retries.\n 10: optional i32 initialIntervalInSeconds\n\n // Coefficient used to calculate the next retry interval.\n // The next retry interval is previous interval multiplied by the coefficient.\n // Must be 1 or larger.\n 20: optional double backoffCoefficient\n\n // Maximum interval between retries. Exponential backoff leads to interval increase.\n // This value is the cap of the increase. Default is 100x of initial interval.\n 30: optional i32 maximumIntervalInSeconds\n\n // Maximum number of attempts. When exceeded the retries stop even if not expired yet.\n // Must be 1 or bigger. Default is unlimited.\n 40: optional i32 maximumAttempts\n\n // Non-Retriable errors. Will stop retrying if error matches this list.\n 50: optional list nonRetriableErrorReasons\n\n // Expiration time for the whole retry process.\n 60: optional i32 expirationIntervalInSeconds\n}\n\n// HistoryBranchRange represents a piece of range for a branch.\nstruct HistoryBranchRange{\n // branchID of original branch forked from\n 10: optional string branchID\n // beinning node for the range, inclusive\n 20: optional i64 beginNodeID\n // ending node for the range, exclusive\n 30: optional i64 endNodeID\n}\n\n// For history persistence to serialize/deserialize branch details\nstruct HistoryBranch{\n 10: optional string treeID\n 20: optional string branchID\n 30: optional list ancestors\n}\n\n// VersionHistoryItem contains signal eventID and the corresponding version\nstruct VersionHistoryItem{\n 10: optional i64 (js.type = \"Long\") eventID\n 20: optional i64 (js.type = \"Long\") version\n}\n\n// VersionHistory contains the version history of a branch\nstruct VersionHistory{\n 10: optional binary branchToken\n 20: optional list items\n}\n\n// VersionHistories contains all version histories from all branches\nstruct VersionHistories{\n 10: optional i32 currentVersionHistoryIndex\n 20: optional list histories\n}\n\n// ReapplyEventsRequest is the request for reapply events API\nstruct ReapplyEventsRequest{\n 10: optional string domainName\n 20: optional WorkflowExecution workflowExecution\n 30: optional DataBlob events\n}" +const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence\n\nexception BadRequestError {\n 1: required string message\n}\n\nexception InternalServiceError {\n 1: required string message\n}\n\nexception DomainAlreadyExistsError {\n 1: required string message\n}\n\nexception WorkflowExecutionAlreadyStartedError {\n 10: optional string message\n 20: optional string startRequestId\n 30: optional string runId\n}\n\nexception EntityNotExistsError {\n 1: required string message\n}\n\nexception ServiceBusyError {\n 1: required string message\n}\n\nexception CancellationAlreadyRequestedError {\n 1: required string message\n}\n\nexception QueryFailedError {\n 1: required string message\n}\n\nexception DomainNotActiveError {\n 1: required string message\n 2: required string domainName\n 3: required string currentCluster\n 4: required string activeCluster\n}\n\nexception LimitExceededError {\n 1: required string message\n}\n\nexception AccessDeniedError {\n 1: required string message\n}\n\nexception RetryTaskError {\n 1: required string message\n 2: optional string domainId\n 3: optional string workflowId\n 4: optional string runId\n 5: optional i64 (js.type = \"Long\") nextEventId\n}\n\nexception RetryTaskV2Error {\n 1: required string message\n}\n\nexception ClientVersionNotSupportedError {\n 1: required string featureVersion\n 2: required string clientImpl\n 3: required string supportedVersions\n}\n\nexception CurrentBranchChangedError {\n 10: required string message\n 20: required binary currentBranchToken\n}\n\nenum WorkflowIdReusePolicy {\n /*\n * allow start a workflow execution using the same workflow ID,\n * when workflow not running, and the last execution close state is in\n * [terminated, cancelled, timeouted, failed].\n */\n AllowDuplicateFailedOnly,\n /*\n * allow start a workflow execution using the same workflow ID,\n * when workflow not running.\n */\n AllowDuplicate,\n /*\n * do not allow start a workflow execution using the same workflow ID at all\n */\n RejectDuplicate,\n}\n\nenum DomainStatus {\n REGISTERED,\n DEPRECATED,\n DELETED,\n}\n\nenum TimeoutType {\n START_TO_CLOSE,\n SCHEDULE_TO_START,\n SCHEDULE_TO_CLOSE,\n HEARTBEAT,\n}\n\nenum ParentClosePolicy {\n\tABANDON,\n\tREQUEST_CANCEL,\n\tTERMINATE,\n}\n\n\n// whenever this list of decision is changed\n// do change the mutableStateBuilder.go\n// function shouldBufferEvent\n// to make sure wo do the correct event ordering\nenum DecisionType {\n ScheduleActivityTask,\n RequestCancelActivityTask,\n StartTimer,\n CompleteWorkflowExecution,\n FailWorkflowExecution,\n CancelTimer,\n CancelWorkflowExecution,\n RequestCancelExternalWorkflowExecution,\n RecordMarker,\n ContinueAsNewWorkflowExecution,\n StartChildWorkflowExecution,\n SignalExternalWorkflowExecution,\n UpsertWorkflowSearchAttributes,\n}\n\nenum EventType {\n WorkflowExecutionStarted,\n WorkflowExecutionCompleted,\n WorkflowExecutionFailed,\n WorkflowExecutionTimedOut,\n DecisionTaskScheduled,\n DecisionTaskStarted,\n DecisionTaskCompleted,\n DecisionTaskTimedOut\n DecisionTaskFailed,\n ActivityTaskScheduled,\n ActivityTaskStarted,\n ActivityTaskCompleted,\n ActivityTaskFailed,\n ActivityTaskTimedOut,\n ActivityTaskCancelRequested,\n RequestCancelActivityTaskFailed,\n ActivityTaskCanceled,\n TimerStarted,\n TimerFired,\n CancelTimerFailed,\n TimerCanceled,\n WorkflowExecutionCancelRequested,\n WorkflowExecutionCanceled,\n RequestCancelExternalWorkflowExecutionInitiated,\n RequestCancelExternalWorkflowExecutionFailed,\n ExternalWorkflowExecutionCancelRequested,\n MarkerRecorded,\n WorkflowExecutionSignaled,\n WorkflowExecutionTerminated,\n WorkflowExecutionContinuedAsNew,\n StartChildWorkflowExecutionInitiated,\n StartChildWorkflowExecutionFailed,\n ChildWorkflowExecutionStarted,\n ChildWorkflowExecutionCompleted,\n ChildWorkflowExecutionFailed,\n ChildWorkflowExecutionCanceled,\n ChildWorkflowExecutionTimedOut,\n ChildWorkflowExecutionTerminated,\n SignalExternalWorkflowExecutionInitiated,\n SignalExternalWorkflowExecutionFailed,\n ExternalWorkflowExecutionSignaled,\n UpsertWorkflowSearchAttributes,\n}\n\nenum DecisionTaskFailedCause {\n UNHANDLED_DECISION,\n BAD_SCHEDULE_ACTIVITY_ATTRIBUTES,\n BAD_REQUEST_CANCEL_ACTIVITY_ATTRIBUTES,\n BAD_START_TIMER_ATTRIBUTES,\n BAD_CANCEL_TIMER_ATTRIBUTES,\n BAD_RECORD_MARKER_ATTRIBUTES,\n BAD_COMPLETE_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_FAIL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_CANCEL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_CONTINUE_AS_NEW_ATTRIBUTES,\n START_TIMER_DUPLICATE_ID,\n RESET_STICKY_TASKLIST,\n WORKFLOW_WORKER_UNHANDLED_FAILURE,\n BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_START_CHILD_EXECUTION_ATTRIBUTES,\n FORCE_CLOSE_DECISION,\n FAILOVER_CLOSE_DECISION,\n BAD_SIGNAL_INPUT_SIZE,\n RESET_WORKFLOW,\n BAD_BINARY,\n SCHEDULE_ACTIVITY_DUPLICATE_ID,\n BAD_SEARCH_ATTRIBUTES,\n}\n\nenum CancelExternalWorkflowExecutionFailedCause {\n UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION,\n}\n\nenum SignalExternalWorkflowExecutionFailedCause {\n UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION,\n}\n\nenum ChildWorkflowExecutionFailedCause {\n WORKFLOW_ALREADY_RUNNING,\n}\n\n// TODO: when migrating to gRPC, add a running / none status,\n// currently, customer is using null / nil as an indication\n// that workflow is still running\nenum WorkflowExecutionCloseStatus {\n COMPLETED,\n FAILED,\n CANCELED,\n TERMINATED,\n CONTINUED_AS_NEW,\n TIMED_OUT,\n}\n\nenum QueryTaskCompletedType {\n COMPLETED,\n FAILED,\n}\n\nenum QueryResultType {\n ANSWERED,\n FAILED,\n}\n\nenum PendingActivityState {\n SCHEDULED,\n STARTED,\n CANCEL_REQUESTED,\n}\n\nenum HistoryEventFilterType {\n ALL_EVENT,\n CLOSE_EVENT,\n}\n\nenum TaskListKind {\n NORMAL,\n STICKY,\n}\n\nenum ArchivalStatus {\n DISABLED,\n ENABLED,\n}\n\nenum IndexedValueType {\n STRING,\n KEYWORD,\n INT,\n DOUBLE,\n BOOL,\n DATETIME,\n}\n\nstruct Header {\n 10: optional map fields\n}\n\nstruct WorkflowType {\n 10: optional string name\n}\n\nstruct ActivityType {\n 10: optional string name\n}\n\nstruct TaskList {\n 10: optional string name\n 20: optional TaskListKind kind\n}\n\nenum EncodingType {\n ThriftRW,\n JSON,\n}\n\nenum QueryRejectCondition {\n // NOT_OPEN indicates that query should be rejected if workflow is not open\n NOT_OPEN\n // NOT_COMPLETED_CLEANLY indicates that query should be rejected if workflow did not complete cleanly\n NOT_COMPLETED_CLEANLY\n}\n\nstruct DataBlob {\n 10: optional EncodingType EncodingType\n 20: optional binary Data\n}\n\nstruct ReplicationInfo {\n 10: optional i64 (js.type = \"Long\") version\n 20: optional i64 (js.type = \"Long\") lastEventId\n}\n\nstruct TaskListMetadata {\n 10: optional double maxTasksPerSecond\n}\n\nstruct WorkflowExecution {\n 10: optional string workflowId\n 20: optional string runId\n}\n\nstruct Memo {\n 10: optional map fields\n}\n\nstruct SearchAttributes {\n 10: optional map indexedFields\n}\n\nstruct WorkflowExecutionInfo {\n 10: optional WorkflowExecution execution\n 20: optional WorkflowType type\n 30: optional i64 (js.type = \"Long\") startTime\n 40: optional i64 (js.type = \"Long\") closeTime\n 50: optional WorkflowExecutionCloseStatus closeStatus\n 60: optional i64 (js.type = \"Long\") historyLength\n 70: optional string parentDomainId\n 80: optional WorkflowExecution parentExecution\n 90: optional i64 (js.type = \"Long\") executionTime\n 100: optional Memo memo\n 101: optional SearchAttributes searchAttributes\n 110: optional ResetPoints autoResetPoints\n}\n\nstruct WorkflowExecutionConfiguration {\n 10: optional TaskList taskList\n 20: optional i32 executionStartToCloseTimeoutSeconds\n 30: optional i32 taskStartToCloseTimeoutSeconds\n// 40: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n}\n\nstruct TransientDecisionInfo {\n 10: optional HistoryEvent scheduledEvent\n 20: optional HistoryEvent startedEvent\n}\n\nstruct ScheduleActivityTaskDecisionAttributes {\n 10: optional string activityId\n 20: optional ActivityType activityType\n 25: optional string domain\n 30: optional TaskList taskList\n 40: optional binary input\n 45: optional i32 scheduleToCloseTimeoutSeconds\n 50: optional i32 scheduleToStartTimeoutSeconds\n 55: optional i32 startToCloseTimeoutSeconds\n 60: optional i32 heartbeatTimeoutSeconds\n 70: optional RetryPolicy retryPolicy\n 80: optional Header header\n}\n\nstruct RequestCancelActivityTaskDecisionAttributes {\n 10: optional string activityId\n}\n\nstruct StartTimerDecisionAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startToFireTimeoutSeconds\n}\n\nstruct CompleteWorkflowExecutionDecisionAttributes {\n 10: optional binary result\n}\n\nstruct FailWorkflowExecutionDecisionAttributes {\n 10: optional string reason\n 20: optional binary details\n}\n\nstruct CancelTimerDecisionAttributes {\n 10: optional string timerId\n}\n\nstruct CancelWorkflowExecutionDecisionAttributes {\n 10: optional binary details\n}\n\nstruct RequestCancelExternalWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional string runId\n 40: optional binary control\n 50: optional bool childWorkflowOnly\n}\n\nstruct SignalExternalWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional string signalName\n 40: optional binary input\n 50: optional binary control\n 60: optional bool childWorkflowOnly\n}\n\nstruct UpsertWorkflowSearchAttributesDecisionAttributes {\n 10: optional SearchAttributes searchAttributes\n}\n\nstruct RecordMarkerDecisionAttributes {\n 10: optional string markerName\n 20: optional binary details\n 30: optional Header header\n}\n\nstruct ContinueAsNewWorkflowExecutionDecisionAttributes {\n 10: optional WorkflowType workflowType\n 20: optional TaskList taskList\n 30: optional binary input\n 40: optional i32 executionStartToCloseTimeoutSeconds\n 50: optional i32 taskStartToCloseTimeoutSeconds\n 60: optional i32 backoffStartIntervalInSeconds\n 70: optional RetryPolicy retryPolicy\n 80: optional ContinueAsNewInitiator initiator\n 90: optional string failureReason\n 100: optional binary failureDetails\n 110: optional binary lastCompletionResult\n 120: optional string cronSchedule\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n}\n\nstruct StartChildWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n// 80: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 81: optional ParentClosePolicy parentClosePolicy\n 90: optional binary control\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 110: optional RetryPolicy retryPolicy\n 120: optional string cronSchedule\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n}\n\nstruct Decision {\n 10: optional DecisionType decisionType\n 20: optional ScheduleActivityTaskDecisionAttributes scheduleActivityTaskDecisionAttributes\n 25: optional StartTimerDecisionAttributes startTimerDecisionAttributes\n 30: optional CompleteWorkflowExecutionDecisionAttributes completeWorkflowExecutionDecisionAttributes\n 35: optional FailWorkflowExecutionDecisionAttributes failWorkflowExecutionDecisionAttributes\n 40: optional RequestCancelActivityTaskDecisionAttributes requestCancelActivityTaskDecisionAttributes\n 50: optional CancelTimerDecisionAttributes cancelTimerDecisionAttributes\n 60: optional CancelWorkflowExecutionDecisionAttributes cancelWorkflowExecutionDecisionAttributes\n 70: optional RequestCancelExternalWorkflowExecutionDecisionAttributes requestCancelExternalWorkflowExecutionDecisionAttributes\n 80: optional RecordMarkerDecisionAttributes recordMarkerDecisionAttributes\n 90: optional ContinueAsNewWorkflowExecutionDecisionAttributes continueAsNewWorkflowExecutionDecisionAttributes\n 100: optional StartChildWorkflowExecutionDecisionAttributes startChildWorkflowExecutionDecisionAttributes\n 110: optional SignalExternalWorkflowExecutionDecisionAttributes signalExternalWorkflowExecutionDecisionAttributes\n 120: optional UpsertWorkflowSearchAttributesDecisionAttributes upsertWorkflowSearchAttributesDecisionAttributes\n}\n\nstruct WorkflowExecutionStartedEventAttributes {\n 10: optional WorkflowType workflowType\n 12: optional string parentWorkflowDomain\n 14: optional WorkflowExecution parentWorkflowExecution\n 16: optional i64 (js.type = \"Long\") parentInitiatedEventId\n 20: optional TaskList taskList\n 30: optional binary input\n 40: optional i32 executionStartToCloseTimeoutSeconds\n 50: optional i32 taskStartToCloseTimeoutSeconds\n// 52: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 54: optional string continuedExecutionRunId\n 55: optional ContinueAsNewInitiator initiator\n 56: optional string continuedFailureReason\n 57: optional binary continuedFailureDetails\n 58: optional binary lastCompletionResult\n 59: optional string originalExecutionRunId // This is the runID when the WorkflowExecutionStarted event is written\n 60: optional string identity\n 61: optional string firstExecutionRunId // This is the very first runID along the chain of ContinueAsNew and Reset.\n 70: optional RetryPolicy retryPolicy\n 80: optional i32 attempt\n 90: optional i64 (js.type = \"Long\") expirationTimestamp\n 100: optional string cronSchedule\n 110: optional i32 firstDecisionTaskBackoffSeconds\n 120: optional Memo memo\n 121: optional SearchAttributes searchAttributes\n 130: optional ResetPoints prevAutoResetPoints\n 140: optional Header header\n}\n\nstruct ResetPoints{\n 10: optional list points\n}\n\n struct ResetPointInfo{\n 10: optional string binaryChecksum\n 20: optional string runId\n 30: optional i64 firstDecisionCompletedId\n 40: optional i64 (js.type = \"Long\") createdTimeNano\n 50: optional i64 (js.type = \"Long\") expiringTimeNano //the time that the run is deleted due to retention\n 60: optional bool resettable // false if the resset point has pending childWFs/reqCancels/signalExternals.\n}\n\nstruct WorkflowExecutionCompletedEventAttributes {\n 10: optional binary result\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct WorkflowExecutionFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct WorkflowExecutionTimedOutEventAttributes {\n 10: optional TimeoutType timeoutType\n}\n\nenum ContinueAsNewInitiator {\n Decider,\n RetryPolicy,\n CronSchedule,\n}\n\nstruct WorkflowExecutionContinuedAsNewEventAttributes {\n 10: optional string newExecutionRunId\n 20: optional WorkflowType workflowType\n 30: optional TaskList taskList\n 40: optional binary input\n 50: optional i32 executionStartToCloseTimeoutSeconds\n 60: optional i32 taskStartToCloseTimeoutSeconds\n 70: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 80: optional i32 backoffStartIntervalInSeconds\n 90: optional ContinueAsNewInitiator initiator\n 100: optional string failureReason\n 110: optional binary failureDetails\n 120: optional binary lastCompletionResult\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n}\n\nstruct DecisionTaskScheduledEventAttributes {\n 10: optional TaskList taskList\n 20: optional i32 startToCloseTimeoutSeconds\n 30: optional i64 (js.type = \"Long\") attempt\n}\n\nstruct DecisionTaskStartedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional string identity\n 30: optional string requestId\n}\n\nstruct DecisionTaskCompletedEventAttributes {\n 10: optional binary executionContext\n 20: optional i64 (js.type = \"Long\") scheduledEventId\n 30: optional i64 (js.type = \"Long\") startedEventId\n 40: optional string identity\n 50: optional string binaryChecksum\n}\n\nstruct DecisionTaskTimedOutEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional TimeoutType timeoutType\n}\n\nstruct DecisionTaskFailedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional DecisionTaskFailedCause cause\n 35: optional binary details\n 40: optional string identity\n 50: optional string reason\n // for reset workflow\n 60: optional string baseRunId\n 70: optional string newRunId\n 80: optional i64 (js.type = \"Long\") forkEventVersion\n}\n\nstruct ActivityTaskScheduledEventAttributes {\n 10: optional string activityId\n 20: optional ActivityType activityType\n 25: optional string domain\n 30: optional TaskList taskList\n 40: optional binary input\n 45: optional i32 scheduleToCloseTimeoutSeconds\n 50: optional i32 scheduleToStartTimeoutSeconds\n 55: optional i32 startToCloseTimeoutSeconds\n 60: optional i32 heartbeatTimeoutSeconds\n 90: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 110: optional RetryPolicy retryPolicy\n 120: optional Header header\n}\n\nstruct ActivityTaskStartedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional string identity\n 30: optional string requestId\n 40: optional i32 attempt\n}\n\nstruct ActivityTaskCompletedEventAttributes {\n 10: optional binary result\n 20: optional i64 (js.type = \"Long\") scheduledEventId\n 30: optional i64 (js.type = \"Long\") startedEventId\n 40: optional string identity\n}\n\nstruct ActivityTaskFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional string identity\n}\n\nstruct ActivityTaskTimedOutEventAttributes {\n 05: optional binary details\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional TimeoutType timeoutType\n}\n\nstruct ActivityTaskCancelRequestedEventAttributes {\n 10: optional string activityId\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct RequestCancelActivityTaskFailedEventAttributes{\n 10: optional string activityId\n 20: optional string cause\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct ActivityTaskCanceledEventAttributes {\n 10: optional binary details\n 20: optional i64 (js.type = \"Long\") latestCancelRequestedEventId\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional string identity\n}\n\nstruct TimerStartedEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startToFireTimeoutSeconds\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct TimerFiredEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct TimerCanceledEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional string identity\n}\n\nstruct CancelTimerFailedEventAttributes {\n 10: optional string timerId\n 20: optional string cause\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional string identity\n}\n\nstruct WorkflowExecutionCancelRequestedEventAttributes {\n 10: optional string cause\n 20: optional i64 (js.type = \"Long\") externalInitiatedEventId\n 30: optional WorkflowExecution externalWorkflowExecution\n 40: optional string identity\n}\n\nstruct WorkflowExecutionCanceledEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional binary details\n}\n\nstruct MarkerRecordedEventAttributes {\n 10: optional string markerName\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional Header header\n}\n\nstruct WorkflowExecutionSignaledEventAttributes {\n 10: optional string signalName\n 20: optional binary input\n 30: optional string identity\n}\n\nstruct WorkflowExecutionTerminatedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RequestCancelExternalWorkflowExecutionInitiatedEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional binary control\n 50: optional bool childWorkflowOnly\n}\n\nstruct RequestCancelExternalWorkflowExecutionFailedEventAttributes {\n 10: optional CancelExternalWorkflowExecutionFailedCause cause\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional binary control\n}\n\nstruct ExternalWorkflowExecutionCancelRequestedEventAttributes {\n 10: optional i64 (js.type = \"Long\") initiatedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n}\n\nstruct SignalExternalWorkflowExecutionInitiatedEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional string signalName\n 50: optional binary input\n 60: optional binary control\n 70: optional bool childWorkflowOnly\n}\n\nstruct SignalExternalWorkflowExecutionFailedEventAttributes {\n 10: optional SignalExternalWorkflowExecutionFailedCause cause\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional binary control\n}\n\nstruct ExternalWorkflowExecutionSignaledEventAttributes {\n 10: optional i64 (js.type = \"Long\") initiatedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional binary control\n}\n\nstruct UpsertWorkflowSearchAttributesEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional SearchAttributes searchAttributes\n}\n\nstruct StartChildWorkflowExecutionInitiatedEventAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n// 80: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 81: optional ParentClosePolicy parentClosePolicy\n 90: optional binary control\n 100: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 110: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 120: optional RetryPolicy retryPolicy\n 130: optional string cronSchedule\n 140: optional Header header\n 150: optional Memo memo\n 160: optional SearchAttributes searchAttributes\n}\n\nstruct StartChildWorkflowExecutionFailedEventAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional ChildWorkflowExecutionFailedCause cause\n 50: optional binary control\n 60: optional i64 (js.type = \"Long\") initiatedEventId\n 70: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct ChildWorkflowExecutionStartedEventAttributes {\n 10: optional string domain\n 20: optional i64 (js.type = \"Long\") initiatedEventId\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional Header header\n}\n\nstruct ChildWorkflowExecutionCompletedEventAttributes {\n 10: optional binary result\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional WorkflowType workflowType\n 60: optional i64 (js.type = \"Long\") initiatedEventId\n 70: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionCanceledEventAttributes {\n 10: optional binary details\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionTimedOutEventAttributes {\n 10: optional TimeoutType timeoutType\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionTerminatedEventAttributes {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional WorkflowType workflowType\n 40: optional i64 (js.type = \"Long\") initiatedEventId\n 50: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct HistoryEvent {\n 10: optional i64 (js.type = \"Long\") eventId\n 20: optional i64 (js.type = \"Long\") timestamp\n 30: optional EventType eventType\n 35: optional i64 (js.type = \"Long\") version\n 36: optional i64 (js.type = \"Long\") taskId\n 40: optional WorkflowExecutionStartedEventAttributes workflowExecutionStartedEventAttributes\n 50: optional WorkflowExecutionCompletedEventAttributes workflowExecutionCompletedEventAttributes\n 60: optional WorkflowExecutionFailedEventAttributes workflowExecutionFailedEventAttributes\n 70: optional WorkflowExecutionTimedOutEventAttributes workflowExecutionTimedOutEventAttributes\n 80: optional DecisionTaskScheduledEventAttributes decisionTaskScheduledEventAttributes\n 90: optional DecisionTaskStartedEventAttributes decisionTaskStartedEventAttributes\n 100: optional DecisionTaskCompletedEventAttributes decisionTaskCompletedEventAttributes\n 110: optional DecisionTaskTimedOutEventAttributes decisionTaskTimedOutEventAttributes\n 120: optional DecisionTaskFailedEventAttributes decisionTaskFailedEventAttributes\n 130: optional ActivityTaskScheduledEventAttributes activityTaskScheduledEventAttributes\n 140: optional ActivityTaskStartedEventAttributes activityTaskStartedEventAttributes\n 150: optional ActivityTaskCompletedEventAttributes activityTaskCompletedEventAttributes\n 160: optional ActivityTaskFailedEventAttributes activityTaskFailedEventAttributes\n 170: optional ActivityTaskTimedOutEventAttributes activityTaskTimedOutEventAttributes\n 180: optional TimerStartedEventAttributes timerStartedEventAttributes\n 190: optional TimerFiredEventAttributes timerFiredEventAttributes\n 200: optional ActivityTaskCancelRequestedEventAttributes activityTaskCancelRequestedEventAttributes\n 210: optional RequestCancelActivityTaskFailedEventAttributes requestCancelActivityTaskFailedEventAttributes\n 220: optional ActivityTaskCanceledEventAttributes activityTaskCanceledEventAttributes\n 230: optional TimerCanceledEventAttributes timerCanceledEventAttributes\n 240: optional CancelTimerFailedEventAttributes cancelTimerFailedEventAttributes\n 250: optional MarkerRecordedEventAttributes markerRecordedEventAttributes\n 260: optional WorkflowExecutionSignaledEventAttributes workflowExecutionSignaledEventAttributes\n 270: optional WorkflowExecutionTerminatedEventAttributes workflowExecutionTerminatedEventAttributes\n 280: optional WorkflowExecutionCancelRequestedEventAttributes workflowExecutionCancelRequestedEventAttributes\n 290: optional WorkflowExecutionCanceledEventAttributes workflowExecutionCanceledEventAttributes\n 300: optional RequestCancelExternalWorkflowExecutionInitiatedEventAttributes requestCancelExternalWorkflowExecutionInitiatedEventAttributes\n 310: optional RequestCancelExternalWorkflowExecutionFailedEventAttributes requestCancelExternalWorkflowExecutionFailedEventAttributes\n 320: optional ExternalWorkflowExecutionCancelRequestedEventAttributes externalWorkflowExecutionCancelRequestedEventAttributes\n 330: optional WorkflowExecutionContinuedAsNewEventAttributes workflowExecutionContinuedAsNewEventAttributes\n 340: optional StartChildWorkflowExecutionInitiatedEventAttributes startChildWorkflowExecutionInitiatedEventAttributes\n 350: optional StartChildWorkflowExecutionFailedEventAttributes startChildWorkflowExecutionFailedEventAttributes\n 360: optional ChildWorkflowExecutionStartedEventAttributes childWorkflowExecutionStartedEventAttributes\n 370: optional ChildWorkflowExecutionCompletedEventAttributes childWorkflowExecutionCompletedEventAttributes\n 380: optional ChildWorkflowExecutionFailedEventAttributes childWorkflowExecutionFailedEventAttributes\n 390: optional ChildWorkflowExecutionCanceledEventAttributes childWorkflowExecutionCanceledEventAttributes\n 400: optional ChildWorkflowExecutionTimedOutEventAttributes childWorkflowExecutionTimedOutEventAttributes\n 410: optional ChildWorkflowExecutionTerminatedEventAttributes childWorkflowExecutionTerminatedEventAttributes\n 420: optional SignalExternalWorkflowExecutionInitiatedEventAttributes signalExternalWorkflowExecutionInitiatedEventAttributes\n 430: optional SignalExternalWorkflowExecutionFailedEventAttributes signalExternalWorkflowExecutionFailedEventAttributes\n 440: optional ExternalWorkflowExecutionSignaledEventAttributes externalWorkflowExecutionSignaledEventAttributes\n 450: optional UpsertWorkflowSearchAttributesEventAttributes upsertWorkflowSearchAttributesEventAttributes\n}\n\nstruct History {\n 10: optional list events\n}\n\nstruct WorkflowExecutionFilter {\n 10: optional string workflowId\n 20: optional string runId\n}\n\nstruct WorkflowTypeFilter {\n 10: optional string name\n}\n\nstruct StartTimeFilter {\n 10: optional i64 (js.type = \"Long\") earliestTime\n 20: optional i64 (js.type = \"Long\") latestTime\n}\n\nstruct DomainInfo {\n 10: optional string name\n 20: optional DomainStatus status\n 30: optional string description\n 40: optional string ownerEmail\n // A key-value map for any customized purpose\n 50: optional map data\n 60: optional string uuid\n}\n\nstruct DomainConfiguration {\n 10: optional i32 workflowExecutionRetentionPeriodInDays\n 20: optional bool emitMetric\n 70: optional BadBinaries badBinaries\n 80: optional ArchivalStatus historyArchivalStatus\n 90: optional string historyArchivalURI\n 100: optional ArchivalStatus visibilityArchivalStatus\n 110: optional string visibilityArchivalURI\n}\n\nstruct BadBinaries{\n 10: optional map binaries\n}\n\nstruct BadBinaryInfo{\n 10: optional string reason\n 20: optional string operator\n 30: optional i64 (js.type = \"Long\") createdTimeNano\n}\n\nstruct UpdateDomainInfo {\n 10: optional string description\n 20: optional string ownerEmail\n // A key-value map for any customized purpose\n 30: optional map data\n}\n\nstruct ClusterReplicationConfiguration {\n 10: optional string clusterName\n}\n\nstruct DomainReplicationConfiguration {\n 10: optional string activeClusterName\n 20: optional list clusters\n}\n\nstruct RegisterDomainRequest {\n 10: optional string name\n 20: optional string description\n 30: optional string ownerEmail\n 40: optional i32 workflowExecutionRetentionPeriodInDays\n 50: optional bool emitMetric\n 60: optional list clusters\n 70: optional string activeClusterName\n // A key-value map for any customized purpose\n 80: optional map data\n 90: optional string securityToken\n 120: optional bool isGlobalDomain\n 130: optional ArchivalStatus historyArchivalStatus\n 140: optional string historyArchivalURI\n 150: optional ArchivalStatus visibilityArchivalStatus\n 160: optional string visibilityArchivalURI\n}\n\nstruct ListDomainsRequest {\n 10: optional i32 pageSize\n 20: optional binary nextPageToken\n}\n\nstruct ListDomainsResponse {\n 10: optional list domains\n 20: optional binary nextPageToken\n}\n\nstruct DescribeDomainRequest {\n 10: optional string name\n 20: optional string uuid\n}\n\nstruct DescribeDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n}\n\nstruct UpdateDomainRequest {\n 10: optional string name\n 20: optional UpdateDomainInfo updatedInfo\n 30: optional DomainConfiguration configuration\n 40: optional DomainReplicationConfiguration replicationConfiguration\n 50: optional string securityToken\n 60: optional string deleteBadBinary\n}\n\nstruct UpdateDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n}\n\nstruct DeprecateDomainRequest {\n 10: optional string name\n 20: optional string securityToken\n}\n\nstruct StartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n 80: optional string identity\n 90: optional string requestId\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n// 110: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 120: optional RetryPolicy retryPolicy\n 130: optional string cronSchedule\n 140: optional Memo memo\n 141: optional SearchAttributes searchAttributes\n 150: optional Header header\n}\n\nstruct StartWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct PollForDecisionTaskRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional string identity\n 40: optional string binaryChecksum\n}\n\nstruct PollForDecisionTaskResponse {\n 10: optional binary taskToken\n 20: optional WorkflowExecution workflowExecution\n 30: optional WorkflowType workflowType\n 40: optional i64 (js.type = \"Long\") previousStartedEventId\n 50: optional i64 (js.type = \"Long\") startedEventId\n 51: optional i64 (js.type = 'Long') attempt\n 54: optional i64 (js.type = \"Long\") backlogCountHint\n 60: optional History history\n 70: optional binary nextPageToken\n 80: optional WorkflowQuery query\n 90: optional TaskList WorkflowExecutionTaskList\n 100: optional i64 (js.type = \"Long\") scheduledTimestamp\n 110: optional i64 (js.type = \"Long\") startedTimestamp\n 120: optional list queries\n}\n\nstruct StickyExecutionAttributes {\n 10: optional TaskList workerTaskList\n 20: optional i32 scheduleToStartTimeoutSeconds\n}\n\nstruct RespondDecisionTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional list decisions\n 30: optional binary executionContext\n 40: optional string identity\n 50: optional StickyExecutionAttributes stickyAttributes\n 60: optional bool returnNewDecisionTask\n 70: optional bool forceCreateNewDecisionTask\n 80: optional string binaryChecksum\n 90: optional list queryResults\n}\n\nstruct RespondDecisionTaskCompletedResponse {\n 10: optional PollForDecisionTaskResponse decisionTask\n}\n\nstruct RespondDecisionTaskFailedRequest {\n 10: optional binary taskToken\n 20: optional DecisionTaskFailedCause cause\n 30: optional binary details\n 40: optional string identity\n}\n\nstruct PollForActivityTaskRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional string identity\n 40: optional TaskListMetadata taskListMetadata\n}\n\nstruct PollForActivityTaskResponse {\n 10: optional binary taskToken\n 20: optional WorkflowExecution workflowExecution\n 30: optional string activityId\n 40: optional ActivityType activityType\n 50: optional binary input\n 70: optional i64 (js.type = \"Long\") scheduledTimestamp\n 80: optional i32 scheduleToCloseTimeoutSeconds\n 90: optional i64 (js.type = \"Long\") startedTimestamp\n 100: optional i32 startToCloseTimeoutSeconds\n 110: optional i32 heartbeatTimeoutSeconds\n 120: optional i32 attempt\n 130: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 140: optional binary heartbeatDetails\n 150: optional WorkflowType workflowType\n 160: optional string workflowDomain\n 170: optional Header header\n}\n\nstruct RecordActivityTaskHeartbeatRequest {\n 10: optional binary taskToken\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RecordActivityTaskHeartbeatByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary details\n 60: optional string identity\n}\n\nstruct RecordActivityTaskHeartbeatResponse {\n 10: optional bool cancelRequested\n}\n\nstruct RespondActivityTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional binary result\n 30: optional string identity\n}\n\nstruct RespondActivityTaskFailedRequest {\n 10: optional binary taskToken\n 20: optional string reason\n 30: optional binary details\n 40: optional string identity\n}\n\nstruct RespondActivityTaskCanceledRequest {\n 10: optional binary taskToken\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RespondActivityTaskCompletedByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary result\n 60: optional string identity\n}\n\nstruct RespondActivityTaskFailedByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional string reason\n 60: optional binary details\n 70: optional string identity\n}\n\nstruct RespondActivityTaskCanceledByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary details\n 60: optional string identity\n}\n\nstruct RequestCancelWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string identity\n 40: optional string requestId\n}\n\nstruct GetWorkflowExecutionHistoryRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional i32 maximumPageSize\n 40: optional binary nextPageToken\n 50: optional bool waitForNewEvent\n 60: optional HistoryEventFilterType HistoryEventFilterType\n}\n\nstruct GetWorkflowExecutionHistoryResponse {\n 10: optional History history\n 20: optional binary nextPageToken\n 30: optional bool archived\n}\n\nstruct SignalWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string signalName\n 40: optional binary input\n 50: optional string identity\n 60: optional string requestId\n 70: optional binary control\n}\n\nstruct SignalWithStartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n 80: optional string identity\n 90: optional string requestId\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 110: optional string signalName\n 120: optional binary signalInput\n 130: optional binary control\n 140: optional RetryPolicy retryPolicy\n 150: optional string cronSchedule\n 160: optional Memo memo\n 161: optional SearchAttributes searchAttributes\n 170: optional Header header\n}\n\nstruct TerminateWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional binary details\n 50: optional string identity\n}\n\nstruct ResetWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional i64 (js.type = \"Long\") decisionFinishEventId\n 50: optional string requestId\n}\n\nstruct ResetWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct ListOpenWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 maximumPageSize\n 30: optional binary nextPageToken\n 40: optional StartTimeFilter StartTimeFilter\n 50: optional WorkflowExecutionFilter executionFilter\n 60: optional WorkflowTypeFilter typeFilter\n}\n\nstruct ListOpenWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListClosedWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 maximumPageSize\n 30: optional binary nextPageToken\n 40: optional StartTimeFilter StartTimeFilter\n 50: optional WorkflowExecutionFilter executionFilter\n 60: optional WorkflowTypeFilter typeFilter\n 70: optional WorkflowExecutionCloseStatus statusFilter\n}\n\nstruct ListClosedWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 pageSize\n 30: optional binary nextPageToken\n 40: optional string query\n}\n\nstruct ListWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListArchivedWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 pageSize\n 30: optional binary nextPageToken\n 40: optional string query\n}\n\nstruct ListArchivedWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct CountWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional string query\n}\n\nstruct CountWorkflowExecutionsResponse {\n 10: optional i64 count\n}\n\nstruct GetSearchAttributesResponse {\n 10: optional map keys\n}\n\nstruct QueryWorkflowRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional WorkflowQuery query\n // QueryRejectCondition can used to reject the query if workflow state does not satisify condition\n 40: optional QueryRejectCondition queryRejectCondition\n}\n\nstruct QueryRejected {\n 10: optional WorkflowExecutionCloseStatus closeStatus\n}\n\nstruct QueryWorkflowResponse {\n 10: optional binary queryResult\n 20: optional QueryRejected queryRejected\n}\n\nstruct WorkflowQuery {\n 10: optional string queryType\n 20: optional binary queryArgs\n}\n\nstruct ResetStickyTaskListRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n\nstruct ResetStickyTaskListResponse {\n // The reason to keep this response is to allow returning\n // information in the future.\n}\n\nstruct RespondQueryTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional QueryTaskCompletedType completedType\n 30: optional binary queryResult\n 40: optional string errorMessage\n}\n\nstruct WorkflowQueryResult {\n 10: optional QueryResultType resultType\n 20: optional binary answer\n 30: optional string errorReason\n 40: optional binary errorDetails\n}\n\nstruct DescribeWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n\nstruct PendingActivityInfo {\n 10: optional string activityID\n 20: optional ActivityType activityType\n 30: optional PendingActivityState state\n 40: optional binary heartbeatDetails\n 50: optional i64 (js.type = \"Long\") lastHeartbeatTimestamp\n 60: optional i64 (js.type = \"Long\") lastStartedTimestamp\n 70: optional i32 attempt\n 80: optional i32 maximumAttempts\n 90: optional i64 (js.type = \"Long\") scheduledTimestamp\n 100: optional i64 (js.type = \"Long\") expirationTimestamp\n 110: optional string lastFailureReason\n 120: optional string lastWorkerIdentity\n 130: optional binary lastFailureDetails\n}\n\nstruct PendingChildExecutionInfo {\n 10: optional string workflowID\n 20: optional string runID\n 30: optional string workflowTypName\n 40: optional i64 (js.type = \"Long\") initiatedID\n 50: optional ParentClosePolicy parentClosePolicy\n}\n\nstruct DescribeWorkflowExecutionResponse {\n 10: optional WorkflowExecutionConfiguration executionConfiguration\n 20: optional WorkflowExecutionInfo workflowExecutionInfo\n 30: optional list pendingActivities\n 40: optional list pendingChildren\n}\n\nstruct DescribeTaskListRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional TaskListType taskListType\n 40: optional bool includeTaskListStatus\n}\n\nstruct DescribeTaskListResponse {\n 10: optional list pollers\n 20: optional TaskListStatus taskListStatus\n}\n\nstruct TaskListStatus {\n 10: optional i64 (js.type = \"Long\") backlogCountHint\n 20: optional i64 (js.type = \"Long\") readLevel\n 30: optional i64 (js.type = \"Long\") ackLevel\n 35: optional double ratePerSecond\n 40: optional TaskIDBlock taskIDBlock\n}\n\nstruct TaskIDBlock {\n 10: optional i64 (js.type = \"Long\") startID\n 20: optional i64 (js.type = \"Long\") endID\n}\n\n//At least one of the parameters needs to be provided\nstruct DescribeHistoryHostRequest {\n 10: optional string hostAddress //ip:port\n 20: optional i32 shardIdForHost\n 30: optional WorkflowExecution executionForHost\n}\n\nstruct RemoveTaskRequest {\n 10: optional i32 shardID\n 20: optional i32 type\n 30: optional i64 (js.type = \"Long\") taskID\n}\n\nstruct CloseShardRequest {\n 10: optional i32 shardID\n}\n\nstruct DescribeHistoryHostResponse{\n 10: optional i32 numberOfShards\n 20: optional list shardIDs\n 30: optional DomainCacheInfo domainCache\n 40: optional string shardControllerStatus\n 50: optional string address\n}\n\nstruct DomainCacheInfo{\n 10: optional i64 numOfItemsInCacheByID\n 20: optional i64 numOfItemsInCacheByName\n}\n\nenum TaskListType {\n /*\n * Decision type of tasklist\n */\n Decision,\n /*\n * Activity type of tasklist\n */\n Activity,\n}\n\nstruct PollerInfo {\n // Unix Nano\n 10: optional i64 (js.type = \"Long\") lastAccessTime\n 20: optional string identity\n 30: optional double ratePerSecond\n}\n\nstruct RetryPolicy {\n // Interval of the first retry. If coefficient is 1.0 then it is used for all retries.\n 10: optional i32 initialIntervalInSeconds\n\n // Coefficient used to calculate the next retry interval.\n // The next retry interval is previous interval multiplied by the coefficient.\n // Must be 1 or larger.\n 20: optional double backoffCoefficient\n\n // Maximum interval between retries. Exponential backoff leads to interval increase.\n // This value is the cap of the increase. Default is 100x of initial interval.\n 30: optional i32 maximumIntervalInSeconds\n\n // Maximum number of attempts. When exceeded the retries stop even if not expired yet.\n // Must be 1 or bigger. Default is unlimited.\n 40: optional i32 maximumAttempts\n\n // Non-Retriable errors. Will stop retrying if error matches this list.\n 50: optional list nonRetriableErrorReasons\n\n // Expiration time for the whole retry process.\n 60: optional i32 expirationIntervalInSeconds\n}\n\n// HistoryBranchRange represents a piece of range for a branch.\nstruct HistoryBranchRange{\n // branchID of original branch forked from\n 10: optional string branchID\n // beinning node for the range, inclusive\n 20: optional i64 beginNodeID\n // ending node for the range, exclusive\n 30: optional i64 endNodeID\n}\n\n// For history persistence to serialize/deserialize branch details\nstruct HistoryBranch{\n 10: optional string treeID\n 20: optional string branchID\n 30: optional list ancestors\n}\n\n// VersionHistoryItem contains signal eventID and the corresponding version\nstruct VersionHistoryItem{\n 10: optional i64 (js.type = \"Long\") eventID\n 20: optional i64 (js.type = \"Long\") version\n}\n\n// VersionHistory contains the version history of a branch\nstruct VersionHistory{\n 10: optional binary branchToken\n 20: optional list items\n}\n\n// VersionHistories contains all version histories from all branches\nstruct VersionHistories{\n 10: optional i32 currentVersionHistoryIndex\n 20: optional list histories\n}\n\n// ReapplyEventsRequest is the request for reapply events API\nstruct ReapplyEventsRequest{\n 10: optional string domainName\n 20: optional WorkflowExecution workflowExecution\n 30: optional DataBlob events\n}" diff --git a/.gen/go/sqlblobs/sqlblobs.go b/.gen/go/sqlblobs/sqlblobs.go index 5883a72f077..2908b6595cb 100644 --- a/.gen/go/sqlblobs/sqlblobs.go +++ b/.gen/go/sqlblobs/sqlblobs.go @@ -68,6 +68,7 @@ type ActivityInfo struct { RetryNonRetryableErrors []string `json:"retryNonRetryableErrors,omitempty"` RetryLastFailureReason *string `json:"retryLastFailureReason,omitempty"` RetryLastWorkerIdentity *string `json:"retryLastWorkerIdentity,omitempty"` + RetryLastFailureDetails []byte `json:"retryLastFailureDetails,omitempty"` } type _List_String_ValueList []string @@ -113,7 +114,7 @@ func (_List_String_ValueList) Close() {} // } func (v *ActivityInfo) ToWire() (wire.Value, error) { var ( - fields [30]wire.Field + fields [31]wire.Field i int = 0 w wire.Value err error @@ -359,6 +360,14 @@ func (v *ActivityInfo) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 68, Value: w} i++ } + if v.RetryLastFailureDetails != nil { + w, err = wire.NewValueBinary(v.RetryLastFailureDetails), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 70, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -696,6 +705,14 @@ func (v *ActivityInfo) FromWire(w wire.Value) error { return err } + } + case 70: + if field.Value.Type() == wire.TBinary { + v.RetryLastFailureDetails, err = field.Value.GetBinary(), error(nil) + if err != nil { + return err + } + } } } @@ -710,7 +727,7 @@ func (v *ActivityInfo) String() string { return "" } - var fields [30]string + var fields [31]string i := 0 if v.Version != nil { fields[i] = fmt.Sprintf("Version: %v", *(v.Version)) @@ -832,6 +849,10 @@ func (v *ActivityInfo) String() string { fields[i] = fmt.Sprintf("RetryLastWorkerIdentity: %v", *(v.RetryLastWorkerIdentity)) i++ } + if v.RetryLastFailureDetails != nil { + fields[i] = fmt.Sprintf("RetryLastFailureDetails: %v", v.RetryLastFailureDetails) + i++ + } return fmt.Sprintf("ActivityInfo{%v}", strings.Join(fields[:i], ", ")) } @@ -1001,6 +1022,9 @@ func (v *ActivityInfo) Equals(rhs *ActivityInfo) bool { if !_String_EqualsPtr(v.RetryLastWorkerIdentity, rhs.RetryLastWorkerIdentity) { return false } + if !((v.RetryLastFailureDetails == nil && rhs.RetryLastFailureDetails == nil) || (v.RetryLastFailureDetails != nil && rhs.RetryLastFailureDetails != nil && bytes.Equal(v.RetryLastFailureDetails, rhs.RetryLastFailureDetails))) { + return false + } return true } @@ -1112,6 +1136,9 @@ func (v *ActivityInfo) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { if v.RetryLastWorkerIdentity != nil { enc.AddString("retryLastWorkerIdentity", *v.RetryLastWorkerIdentity) } + if v.RetryLastFailureDetails != nil { + enc.AddString("retryLastFailureDetails", base64.StdEncoding.EncodeToString(v.RetryLastFailureDetails)) + } return err } @@ -1565,6 +1592,21 @@ func (v *ActivityInfo) IsSetRetryLastWorkerIdentity() bool { return v != nil && v.RetryLastWorkerIdentity != nil } +// GetRetryLastFailureDetails returns the value of RetryLastFailureDetails if it is set or its +// zero value if it is unset. +func (v *ActivityInfo) GetRetryLastFailureDetails() (o []byte) { + if v != nil && v.RetryLastFailureDetails != nil { + return v.RetryLastFailureDetails + } + + return +} + +// IsSetRetryLastFailureDetails returns true if RetryLastFailureDetails is not nil. +func (v *ActivityInfo) IsSetRetryLastFailureDetails() bool { + return v != nil && v.RetryLastFailureDetails != nil +} + type ChildExecutionInfo struct { Version *int64 `json:"version,omitempty"` InitiatedEventBatchID *int64 `json:"initiatedEventBatchID,omitempty"` @@ -10541,11 +10583,11 @@ var ThriftModule = &thriftreflect.ThriftModule{ Name: "sqlblobs", Package: "github.com/uber/cadence/.gen/go/sqlblobs", FilePath: "sqlblobs.thrift", - SHA1: "46228861e3b06d499a6a35ae362362f3241ab017", + SHA1: "cf103e145116be6a247e2fc8cc529c698b4ed3a0", Includes: []*thriftreflect.ThriftModule{ shared.ThriftModule, }, Raw: rawIDL, } -const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence.sqlblobs\n\ninclude \"shared.thrift\"\n\nstruct ShardInfo {\n 10: optional i32 stolenSinceRenew\n 12: optional i64 (js.type = \"Long\") updatedAtNanos\n 14: optional i64 (js.type = \"Long\") replicationAckLevel\n 16: optional i64 (js.type = \"Long\") transferAckLevel\n 18: optional i64 (js.type = \"Long\") timerAckLevelNanos\n 24: optional i64 (js.type = \"Long\") domainNotificationVersion\n 34: optional map clusterTransferAckLevel\n 36: optional map clusterTimerAckLevel\n 38: optional string owner\n 40: optional map clusterReplicationLevel\n}\n\nstruct DomainInfo {\n 10: optional string name\n 12: optional string description\n 14: optional string owner\n 16: optional i32 status\n 18: optional i16 retentionDays\n 20: optional bool emitMetric\n 22: optional string archivalBucket\n 24: optional i16 archivalStatus\n 26: optional i64 (js.type = \"Long\") configVersion\n 28: optional i64 (js.type = \"Long\") notificationVersion\n 30: optional i64 (js.type = \"Long\") failoverNotificationVersion\n 32: optional i64 (js.type = \"Long\") failoverVersion\n 34: optional string activeClusterName\n 36: optional list clusters\n 38: optional map data\n 39: optional binary badBinaries\n 40: optional string badBinariesEncoding\n 42: optional i16 historyArchivalStatus\n 44: optional string historyArchivalURI\n 46: optional i16 visibilityArchivalStatus\n 48: optional string visibilityArchivalURI\n}\n\nstruct HistoryTreeInfo {\n 10: optional i64 (js.type = \"Long\") createdTimeNanos // For fork operation to prevent race condition of leaking event data when forking branches fail. Also can be used for clean up leaked data\n 12: optional list ancestors\n 14: optional string info // For lookup back to workflow during debugging, also background cleanup when fork operation cannot finish self cleanup due to crash.\n}\n\nstruct ReplicationInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") lastEventID\n}\n\nstruct WorkflowExecutionInfo {\n 10: optional binary parentDomainID\n 12: optional string parentWorkflowID\n 14: optional binary parentRunID\n 16: optional i64 (js.type = \"Long\") initiatedID\n 18: optional i64 (js.type = \"Long\") completionEventBatchID\n 20: optional binary completionEvent\n 22: optional string completionEventEncoding\n 24: optional string taskList\n 26: optional string workflowTypeName\n 28: optional i32 workflowTimeoutSeconds\n 30: optional i32 decisionTaskTimeoutSeconds\n 32: optional binary executionContext\n 34: optional i32 state\n 36: optional i32 closeStatus\n 38: optional i64 (js.type = \"Long\") startVersion\n 40: optional i64 (js.type = \"Long\") currentVersion\n 44: optional i64 (js.type = \"Long\") lastWriteEventID\n 46: optional map lastReplicationInfo\n 48: optional i64 (js.type = \"Long\") lastEventTaskID\n 50: optional i64 (js.type = \"Long\") lastFirstEventID\n 52: optional i64 (js.type = \"Long\") lastProcessedEvent\n 54: optional i64 (js.type = \"Long\") startTimeNanos\n 56: optional i64 (js.type = \"Long\") lastUpdatedTimeNanos\n 58: optional i64 (js.type = \"Long\") decisionVersion\n 60: optional i64 (js.type = \"Long\") decisionScheduleID\n 62: optional i64 (js.type = \"Long\") decisionStartedID\n 64: optional i32 decisionTimeout\n 66: optional i64 (js.type = \"Long\") decisionAttempt\n 68: optional i64 (js.type = \"Long\") decisionStartedTimestampNanos\n 69: optional i64 (js.type = \"Long\") decisionScheduledTimestampNanos\n 70: optional bool cancelRequested\n 71: optional i64 (js.type = \"Long\") decisionOriginalScheduledTimestampNanos\n 72: optional string createRequestID\n 74: optional string decisionRequestID\n 76: optional string cancelRequestID\n 78: optional string stickyTaskList\n 80: optional i64 (js.type = \"Long\") stickyScheduleToStartTimeout\n 82: optional i64 (js.type = \"Long\") retryAttempt\n 84: optional i32 retryInitialIntervalSeconds\n 86: optional i32 retryMaximumIntervalSeconds\n 88: optional i32 retryMaximumAttempts\n 90: optional i32 retryExpirationSeconds\n 92: optional double retryBackoffCoefficient\n 94: optional i64 (js.type = \"Long\") retryExpirationTimeNanos\n 96: optional list retryNonRetryableErrors\n 98: optional bool hasRetryPolicy\n 100: optional string cronSchedule\n 102: optional i32 eventStoreVersion\n 104: optional binary eventBranchToken\n 106: optional i64 (js.type = \"Long\") signalCount\n 108: optional i64 (js.type = \"Long\") historySize\n 110: optional string clientLibraryVersion\n 112: optional string clientFeatureVersion\n 114: optional string clientImpl\n 115: optional binary autoResetPoints\n 116: optional string autoResetPointsEncoding\n 118: optional map searchAttributes\n 120: optional map memo\n 122: optional binary versionHistories\n 124: optional string versionHistoriesEncoding\n}\n\nstruct ActivityInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") scheduledEventBatchID\n 14: optional binary scheduledEvent\n 16: optional string scheduledEventEncoding\n 18: optional i64 (js.type = \"Long\") scheduledTimeNanos\n 20: optional i64 (js.type = \"Long\") startedID\n 22: optional binary startedEvent\n 24: optional string startedEventEncoding\n 26: optional i64 (js.type = \"Long\") startedTimeNanos\n 28: optional string activityID\n 30: optional string requestID\n 32: optional i32 scheduleToStartTimeoutSeconds\n 34: optional i32 scheduleToCloseTimeoutSeconds\n 36: optional i32 startToCloseTimeoutSeconds\n 38: optional i32 heartbeatTimeoutSeconds\n 40: optional bool cancelRequested\n 42: optional i64 (js.type = \"Long\") cancelRequestID\n 44: optional i32 timerTaskStatus\n 46: optional i32 attempt\n 48: optional string taskList\n 50: optional string startedIdentity\n 52: optional bool hasRetryPolicy\n 54: optional i32 retryInitialIntervalSeconds\n 56: optional i32 retryMaximumIntervalSeconds\n 58: optional i32 retryMaximumAttempts\n 60: optional i64 (js.type = \"Long\") retryExpirationTimeNanos\n 62: optional double retryBackoffCoefficient\n 64: optional list retryNonRetryableErrors\n 66: optional string retryLastFailureReason\n 68: optional string retryLastWorkerIdentity\n}\n\nstruct ChildExecutionInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") initiatedEventBatchID\n 14: optional i64 (js.type = \"Long\") startedID\n 16: optional binary initiatedEvent\n 18: optional string initiatedEventEncoding\n 20: optional string startedWorkflowID\n 22: optional binary startedRunID\n 24: optional binary startedEvent\n 26: optional string startedEventEncoding\n 28: optional string createRequestID\n 30: optional string domainName\n 32: optional string workflowTypeName\n 35: optional i32 parentClosePolicy\n}\n\nstruct SignalInfo {\n 10: optional i64 (js.type = \"Long\") version\n 11: optional i64 (js.type = \"Long\") initiatedEventBatchID\n 12: optional string requestID\n 14: optional string name\n 16: optional binary input\n 18: optional binary control\n}\n\nstruct RequestCancelInfo {\n 10: optional i64 (js.type = \"Long\") version\n 11: optional i64 (js.type = \"Long\") initiatedEventBatchID\n 12: optional string cancelRequestID\n}\n\nstruct TimerInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") startedID\n 14: optional i64 (js.type = \"Long\") expiryTimeNanos\n 16: optional i64 (js.type = \"Long\") taskID\n}\n\nstruct TaskInfo {\n 10: optional string workflowID\n 12: optional binary runID\n 13: optional i64 (js.type = \"Long\") scheduleID\n 14: optional i64 (js.type = \"Long\") expiryTimeNanos\n 15: optional i64 (js.type = \"Long\") createdTimeNanos\n}\n\nstruct TaskListInfo {\n 10: optional i16 kind // {Normal, Sticky}\n 12: optional i64 (js.type = \"Long\") ackLevel\n 14: optional i64 (js.type = \"Long\") expiryTimeNanos\n 16: optional i64 (js.type = \"Long\") lastUpdatedNanos\n}\n\nstruct TransferTaskInfo {\n 10: optional binary domainID\n 12: optional string workflowID\n 14: optional binary runID\n 16: optional i16 taskType\n 18: optional binary targetDomainID\n 20: optional string targetWorkflowID\n 22: optional binary targetRunID\n 24: optional string taskList\n 26: optional bool targetChildWorkflowOnly\n 28: optional i64 (js.type = \"Long\") scheduleID\n 30: optional i64 (js.type = \"Long\") version\n 32: optional i64 (js.type = \"Long\") visibilityTimestampNanos\n}\n\nstruct TimerTaskInfo {\n 10: optional binary domainID\n 12: optional string workflowID\n 14: optional binary runID\n 16: optional i16 taskType\n 18: optional i16 timeoutType\n 20: optional i64 (js.type = \"Long\") version\n 22: optional i64 (js.type = \"Long\") scheduleAttempt\n 24: optional i64 (js.type = \"Long\") eventID\n}\n\nstruct ReplicationTaskInfo {\n 10: optional binary domainID\n 12: optional string workflowID\n 14: optional binary runID\n 16: optional i16 taskType\n 18: optional i64 (js.type = \"Long\") version\n 20: optional i64 (js.type = \"Long\") firstEventID\n 22: optional i64 (js.type = \"Long\") nextEventID\n 24: optional i64 (js.type = \"Long\") scheduledID\n 26: optional i32 eventStoreVersion\n 28: optional i32 newRunEventStoreVersion\n 30: optional binary branch_token\n 32: optional map lastReplicationInfo\n 34: optional binary newRunBranchToken\n 36: optional bool resetWorkflow\n}" +const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence.sqlblobs\n\ninclude \"shared.thrift\"\n\nstruct ShardInfo {\n 10: optional i32 stolenSinceRenew\n 12: optional i64 (js.type = \"Long\") updatedAtNanos\n 14: optional i64 (js.type = \"Long\") replicationAckLevel\n 16: optional i64 (js.type = \"Long\") transferAckLevel\n 18: optional i64 (js.type = \"Long\") timerAckLevelNanos\n 24: optional i64 (js.type = \"Long\") domainNotificationVersion\n 34: optional map clusterTransferAckLevel\n 36: optional map clusterTimerAckLevel\n 38: optional string owner\n 40: optional map clusterReplicationLevel\n}\n\nstruct DomainInfo {\n 10: optional string name\n 12: optional string description\n 14: optional string owner\n 16: optional i32 status\n 18: optional i16 retentionDays\n 20: optional bool emitMetric\n 22: optional string archivalBucket\n 24: optional i16 archivalStatus\n 26: optional i64 (js.type = \"Long\") configVersion\n 28: optional i64 (js.type = \"Long\") notificationVersion\n 30: optional i64 (js.type = \"Long\") failoverNotificationVersion\n 32: optional i64 (js.type = \"Long\") failoverVersion\n 34: optional string activeClusterName\n 36: optional list clusters\n 38: optional map data\n 39: optional binary badBinaries\n 40: optional string badBinariesEncoding\n 42: optional i16 historyArchivalStatus\n 44: optional string historyArchivalURI\n 46: optional i16 visibilityArchivalStatus\n 48: optional string visibilityArchivalURI\n}\n\nstruct HistoryTreeInfo {\n 10: optional i64 (js.type = \"Long\") createdTimeNanos // For fork operation to prevent race condition of leaking event data when forking branches fail. Also can be used for clean up leaked data\n 12: optional list ancestors\n 14: optional string info // For lookup back to workflow during debugging, also background cleanup when fork operation cannot finish self cleanup due to crash.\n}\n\nstruct ReplicationInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") lastEventID\n}\n\nstruct WorkflowExecutionInfo {\n 10: optional binary parentDomainID\n 12: optional string parentWorkflowID\n 14: optional binary parentRunID\n 16: optional i64 (js.type = \"Long\") initiatedID\n 18: optional i64 (js.type = \"Long\") completionEventBatchID\n 20: optional binary completionEvent\n 22: optional string completionEventEncoding\n 24: optional string taskList\n 26: optional string workflowTypeName\n 28: optional i32 workflowTimeoutSeconds\n 30: optional i32 decisionTaskTimeoutSeconds\n 32: optional binary executionContext\n 34: optional i32 state\n 36: optional i32 closeStatus\n 38: optional i64 (js.type = \"Long\") startVersion\n 40: optional i64 (js.type = \"Long\") currentVersion\n 44: optional i64 (js.type = \"Long\") lastWriteEventID\n 46: optional map lastReplicationInfo\n 48: optional i64 (js.type = \"Long\") lastEventTaskID\n 50: optional i64 (js.type = \"Long\") lastFirstEventID\n 52: optional i64 (js.type = \"Long\") lastProcessedEvent\n 54: optional i64 (js.type = \"Long\") startTimeNanos\n 56: optional i64 (js.type = \"Long\") lastUpdatedTimeNanos\n 58: optional i64 (js.type = \"Long\") decisionVersion\n 60: optional i64 (js.type = \"Long\") decisionScheduleID\n 62: optional i64 (js.type = \"Long\") decisionStartedID\n 64: optional i32 decisionTimeout\n 66: optional i64 (js.type = \"Long\") decisionAttempt\n 68: optional i64 (js.type = \"Long\") decisionStartedTimestampNanos\n 69: optional i64 (js.type = \"Long\") decisionScheduledTimestampNanos\n 70: optional bool cancelRequested\n 71: optional i64 (js.type = \"Long\") decisionOriginalScheduledTimestampNanos\n 72: optional string createRequestID\n 74: optional string decisionRequestID\n 76: optional string cancelRequestID\n 78: optional string stickyTaskList\n 80: optional i64 (js.type = \"Long\") stickyScheduleToStartTimeout\n 82: optional i64 (js.type = \"Long\") retryAttempt\n 84: optional i32 retryInitialIntervalSeconds\n 86: optional i32 retryMaximumIntervalSeconds\n 88: optional i32 retryMaximumAttempts\n 90: optional i32 retryExpirationSeconds\n 92: optional double retryBackoffCoefficient\n 94: optional i64 (js.type = \"Long\") retryExpirationTimeNanos\n 96: optional list retryNonRetryableErrors\n 98: optional bool hasRetryPolicy\n 100: optional string cronSchedule\n 102: optional i32 eventStoreVersion\n 104: optional binary eventBranchToken\n 106: optional i64 (js.type = \"Long\") signalCount\n 108: optional i64 (js.type = \"Long\") historySize\n 110: optional string clientLibraryVersion\n 112: optional string clientFeatureVersion\n 114: optional string clientImpl\n 115: optional binary autoResetPoints\n 116: optional string autoResetPointsEncoding\n 118: optional map searchAttributes\n 120: optional map memo\n 122: optional binary versionHistories\n 124: optional string versionHistoriesEncoding\n}\n\nstruct ActivityInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") scheduledEventBatchID\n 14: optional binary scheduledEvent\n 16: optional string scheduledEventEncoding\n 18: optional i64 (js.type = \"Long\") scheduledTimeNanos\n 20: optional i64 (js.type = \"Long\") startedID\n 22: optional binary startedEvent\n 24: optional string startedEventEncoding\n 26: optional i64 (js.type = \"Long\") startedTimeNanos\n 28: optional string activityID\n 30: optional string requestID\n 32: optional i32 scheduleToStartTimeoutSeconds\n 34: optional i32 scheduleToCloseTimeoutSeconds\n 36: optional i32 startToCloseTimeoutSeconds\n 38: optional i32 heartbeatTimeoutSeconds\n 40: optional bool cancelRequested\n 42: optional i64 (js.type = \"Long\") cancelRequestID\n 44: optional i32 timerTaskStatus\n 46: optional i32 attempt\n 48: optional string taskList\n 50: optional string startedIdentity\n 52: optional bool hasRetryPolicy\n 54: optional i32 retryInitialIntervalSeconds\n 56: optional i32 retryMaximumIntervalSeconds\n 58: optional i32 retryMaximumAttempts\n 60: optional i64 (js.type = \"Long\") retryExpirationTimeNanos\n 62: optional double retryBackoffCoefficient\n 64: optional list retryNonRetryableErrors\n 66: optional string retryLastFailureReason\n 68: optional string retryLastWorkerIdentity\n 70: optional binary retryLastFailureDetails\n}\n\nstruct ChildExecutionInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") initiatedEventBatchID\n 14: optional i64 (js.type = \"Long\") startedID\n 16: optional binary initiatedEvent\n 18: optional string initiatedEventEncoding\n 20: optional string startedWorkflowID\n 22: optional binary startedRunID\n 24: optional binary startedEvent\n 26: optional string startedEventEncoding\n 28: optional string createRequestID\n 30: optional string domainName\n 32: optional string workflowTypeName\n 35: optional i32 parentClosePolicy\n}\n\nstruct SignalInfo {\n 10: optional i64 (js.type = \"Long\") version\n 11: optional i64 (js.type = \"Long\") initiatedEventBatchID\n 12: optional string requestID\n 14: optional string name\n 16: optional binary input\n 18: optional binary control\n}\n\nstruct RequestCancelInfo {\n 10: optional i64 (js.type = \"Long\") version\n 11: optional i64 (js.type = \"Long\") initiatedEventBatchID\n 12: optional string cancelRequestID\n}\n\nstruct TimerInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") startedID\n 14: optional i64 (js.type = \"Long\") expiryTimeNanos\n 16: optional i64 (js.type = \"Long\") taskID\n}\n\nstruct TaskInfo {\n 10: optional string workflowID\n 12: optional binary runID\n 13: optional i64 (js.type = \"Long\") scheduleID\n 14: optional i64 (js.type = \"Long\") expiryTimeNanos\n 15: optional i64 (js.type = \"Long\") createdTimeNanos\n}\n\nstruct TaskListInfo {\n 10: optional i16 kind // {Normal, Sticky}\n 12: optional i64 (js.type = \"Long\") ackLevel\n 14: optional i64 (js.type = \"Long\") expiryTimeNanos\n 16: optional i64 (js.type = \"Long\") lastUpdatedNanos\n}\n\nstruct TransferTaskInfo {\n 10: optional binary domainID\n 12: optional string workflowID\n 14: optional binary runID\n 16: optional i16 taskType\n 18: optional binary targetDomainID\n 20: optional string targetWorkflowID\n 22: optional binary targetRunID\n 24: optional string taskList\n 26: optional bool targetChildWorkflowOnly\n 28: optional i64 (js.type = \"Long\") scheduleID\n 30: optional i64 (js.type = \"Long\") version\n 32: optional i64 (js.type = \"Long\") visibilityTimestampNanos\n}\n\nstruct TimerTaskInfo {\n 10: optional binary domainID\n 12: optional string workflowID\n 14: optional binary runID\n 16: optional i16 taskType\n 18: optional i16 timeoutType\n 20: optional i64 (js.type = \"Long\") version\n 22: optional i64 (js.type = \"Long\") scheduleAttempt\n 24: optional i64 (js.type = \"Long\") eventID\n}\n\nstruct ReplicationTaskInfo {\n 10: optional binary domainID\n 12: optional string workflowID\n 14: optional binary runID\n 16: optional i16 taskType\n 18: optional i64 (js.type = \"Long\") version\n 20: optional i64 (js.type = \"Long\") firstEventID\n 22: optional i64 (js.type = \"Long\") nextEventID\n 24: optional i64 (js.type = \"Long\") scheduledID\n 26: optional i32 eventStoreVersion\n 28: optional i32 newRunEventStoreVersion\n 30: optional binary branch_token\n 32: optional map lastReplicationInfo\n 34: optional binary newRunBranchToken\n 36: optional bool resetWorkflow\n}" diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index ecd1fd45060..00000000000 --- a/Gopkg.lock +++ /dev/null @@ -1,1190 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - digest = "1:e4b30804a381d7603b8a344009987c1ba351c26043501b23b8c7ce21f0b67474" - name = "github.com/BurntSushi/toml" - packages = ["."] - pruneopts = "" - revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005" - version = "v0.3.1" - -[[projects]] - digest = "1:82041ab48e5c76da656b723fdc13a2b9ec716cdc736f82adaac77f5c39d4fca8" - name = "github.com/DataDog/zstd" - packages = ["."] - pruneopts = "" - revision = "2347a397da4ee9c6b8226d4aff82c302d0e52773" - version = "v1.4.1" - -[[projects]] - digest = "1:5dd52495eaf9fad11f4742f341166aa9eb68f70061fc1a9b546f9481b284b6d8" - name = "github.com/Shopify/sarama" - packages = ["."] - pruneopts = "" - revision = "46c83074a05474240f9620fb7c70fb0d80ca401a" - version = "v1.23.1" - -[[projects]] - branch = "master" - digest = "1:3c292d5c4fe036d536e3a5945caa056f295e82142f1cbb493cf3d9ec7198e0e3" - name = "github.com/anmitsu/go-shlex" - packages = ["."] - pruneopts = "" - revision = "648efa622239a2f6ff949fed78ee37b48d499ba4" - -[[projects]] - digest = "1:158df1b5f5844347008fa48fc3efc82131d6b67570a31516a04115a67facdabd" - name = "github.com/apache/thrift" - packages = ["lib/go/thrift"] - pruneopts = "" - revision = "b2a4d4ae21c789b689dd162deb819665567f481c" - version = "0.10.0" - -[[projects]] - branch = "master" - digest = "1:afaa6de27e2d86b66cf71d55096f00e32b2ef40ec3349b535555aa81c77bc7d3" - name = "github.com/benbjohnson/clock" - packages = ["."] - pruneopts = "" - revision = "7dc76406b6d3c05b5f71a86293cbcf3c4ea03b19" - -[[projects]] - digest = "1:ac2a05be7167c495fe8aaf8aaf62ecf81e78d2180ecb04e16778dc6c185c96a5" - name = "github.com/beorn7/perks" - packages = ["quantile"] - pruneopts = "" - revision = "37c8de3658fcb183f997c4e13e8337516ab753e6" - version = "v1.0.1" - -[[projects]] - digest = "1:6da5545112f73dbad12895d25e39818c1c3e8040ebba488d4d3fe43bc8685eb6" - name = "github.com/bsm/sarama-cluster" - packages = ["."] - pruneopts = "" - revision = "c618e605e15c0d7535f6c96ff8efbb0dba4fd66c" - version = "v2.1.15" - -[[projects]] - digest = "1:47fe89a242ccbae03d31b4c665d3d983786acd316c0d7c51bcfa0d019b205004" - name = "github.com/cactus/go-statsd-client" - packages = ["statsd"] - pruneopts = "" - revision = "138b925ccdf617776955904ba7759fce64406cec" - version = "v3.1.1" - -[[projects]] - branch = "master" - digest = "1:e67b5f68ee2c2d4376973daa8d1589c1e0fc95d0be2d2607dabf2a6a2401239e" - name = "github.com/cch123/elasticsql" - packages = ["."] - pruneopts = "" - revision = "a1a440758eb915b22b160eb43e6aa6c5212b1064" - -[[projects]] - digest = "1:0deddd908b6b4b768cfc272c16ee61e7088a60f7fe2f06c547bd3d8e1f8b8e77" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "" - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" - -[[projects]] - branch = "master" - digest = "1:6d8224c46bae831d782f1af734c1bef2765f5ac81307dd8d522d61a9dad5aa1f" - name = "github.com/dgryski/go-farm" - packages = ["."] - pruneopts = "" - revision = "6a90982ecee230ff6cba02d5bd386acc030be9d3" - -[[projects]] - digest = "1:8c9af6e3162383951dee97404fadb761f9ae60b4806998df288a5b143afa7251" - name = "github.com/eapache/go-resiliency" - packages = ["breaker"] - pruneopts = "" - revision = "5efd2ed019fd331ec2defc6f3bd98882f1e3e636" - version = "v1.2.0" - -[[projects]] - branch = "master" - digest = "1:6643c01e619a68f80ac12ad81223275df653528c6d7e3788291c1fd6f1d622f6" - name = "github.com/eapache/go-xerial-snappy" - packages = ["."] - pruneopts = "" - revision = "776d5712da21bc4762676d614db1d8a64f4238b0" - -[[projects]] - digest = "1:d8d46d21073d0f65daf1740ebf4629c65e04bf92e14ce93c2201e8624843c3d3" - name = "github.com/eapache/queue" - packages = ["."] - pruneopts = "" - revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98" - version = "v1.1.0" - -[[projects]] - branch = "master" - digest = "1:d99c30df4cfa91354bde67ecedc2a3cb76098c5d88658d5ffb5e1c0f6f6edb96" - name = "github.com/emirpasic/gods" - packages = [ - "containers", - "maps", - "maps/treemap", - "trees", - "trees/redblacktree", - "utils", - ] - pruneopts = "" - revision = "e689965507abbc7225d130c80ca2560291891f8a" - -[[projects]] - branch = "master" - digest = "1:88b37f144a80737f9e5cd50c887c00c2f3c7211257f884b8b80ce97e61ed1ccb" - name = "github.com/facebookgo/clock" - packages = ["."] - pruneopts = "" - revision = "600d898af40aa09a7a93ecb9265d87b0504b6f03" - -[[projects]] - branch = "master" - digest = "1:eeba285fdc8e024e3305d5efd2a85e9e46097172873f9c841ae575527e6affbe" - name = "github.com/fatih/color" - packages = ["."] - pruneopts = "" - revision = "3f9d52f7176a6927daacff70a3e8d1dc2025c53e" - -[[projects]] - digest = "1:f7ad09e1bd153d7de18080ccd5e7bef5ebdba9403aa3cde487f0daf7ef67820d" - name = "github.com/fatih/structtag" - packages = ["."] - pruneopts = "" - revision = "76ae1d6d2117609598c7d4e8f3e938145f204e8f" - version = "v1.0.0" - -[[projects]] - digest = "1:e692d16fdfbddb94e9e4886aaf6c08bdbae5cb4ac80651445de9181b371c6e46" - name = "github.com/go-sql-driver/mysql" - packages = ["."] - pruneopts = "" - revision = "72cd26f257d44c1114970e19afddcd812016007e" - version = "v1.4.1" - -[[projects]] - digest = "1:8cd4837a22186b8d2a9a8acd02ce6c31504a1ece1e8d21d9642166b2559be41c" - name = "github.com/gocql/gocql" - packages = [ - ".", - "internal/lru", - "internal/murmur", - "internal/streams", - ] - pruneopts = "" - revision = "56a164ee9f3135e9cfe725a6d25939f24cb2d044" - -[[projects]] - digest = "1:fd53b471edb4c28c7d297f617f4da0d33402755f58d6301e7ca1197ef0a90937" - name = "github.com/gogo/protobuf" - packages = [ - "gogoproto", - "plugin/compare", - "plugin/defaultcheck", - "plugin/description", - "plugin/embedcheck", - "plugin/enumstringer", - "plugin/equal", - "plugin/face", - "plugin/gostring", - "plugin/marshalto", - "plugin/oneofcheck", - "plugin/populate", - "plugin/size", - "plugin/stringer", - "plugin/testgen", - "plugin/union", - "plugin/unmarshal", - "proto", - "protoc-gen-gogo/descriptor", - "protoc-gen-gogo/generator", - "protoc-gen-gogo/generator/internal/remap", - "protoc-gen-gogo/grpc", - "protoc-gen-gogo/plugin", - "protoc-gen-gogoslick", - "vanity", - "vanity/command", - ] - pruneopts = "" - revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c" - version = "v1.2.1" - -[[projects]] - digest = "1:68c64bb61d55dcd17c82ca0b871ddddb5ae18b30cfe26f6bfd4b6df6287dc2e0" - name = "github.com/golang/mock" - packages = [ - "gomock", - "mockgen", - "mockgen/model", - ] - pruneopts = "" - revision = "9fa652df1129bef0e734c9cf9bf6dbae9ef3b9fa" - version = "1.3.1" - -[[projects]] - digest = "1:b852d2b62be24e445fcdbad9ce3015b44c207815d631230dfce3f14e7803f5bf" - name = "github.com/golang/protobuf" - packages = ["proto"] - pruneopts = "" - revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7" - version = "v1.3.2" - -[[projects]] - digest = "1:6a6322a15aa8e99bd156fbba0aae4e5d67b4bb05251d860b348a45dfdcba9cce" - name = "github.com/golang/snappy" - packages = ["."] - pruneopts = "" - revision = "2a8bb927dd31d8daada140a5d09578521ce5c36a" - version = "v0.0.1" - -[[projects]] - digest = "1:ad92aa49f34cbc3546063c7eb2cabb55ee2278b72842eda80e2a20a8a06a8d73" - name = "github.com/google/uuid" - packages = ["."] - pruneopts = "" - revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4" - version = "v1.1.1" - -[[projects]] - branch = "master" - digest = "1:60b7bc5e043a11213472ae05252527287d20e0a6ccc18f6ae67fad88e41004de" - name = "github.com/hailocab/go-hostpool" - packages = ["."] - pruneopts = "" - revision = "e80d13ce29ede4452c43dea11e79b9bc8a15b478" - -[[projects]] - digest = "1:0038a7f43b51c8b2a8cd03b5372e73f8eadfe156484c2ae8185ae836f8ebc2cd" - name = "github.com/hashicorp/go-uuid" - packages = ["."] - pruneopts = "" - revision = "4f571afc59f3043a65f8fe6bf46d887b10a01d43" - version = "v1.0.1" - -[[projects]] - digest = "1:2f0c811248aeb64978037b357178b1593372439146bda860cb16f2c80785ea93" - name = "github.com/hashicorp/go-version" - packages = ["."] - pruneopts = "" - revision = "ac23dc3fea5d1a983c43f6a0f6e2c13f0195d8bd" - version = "v1.2.0" - -[[projects]] - branch = "master" - digest = "1:8b41cd00226c030c2a075230cd7c2030a05f2e3bab0a545ad059565f2c956b3d" - name = "github.com/iancoleman/strcase" - packages = ["."] - pruneopts = "" - revision = "e506e3ef73653e84c592ba44aab577a46678f68c" - -[[projects]] - digest = "1:d45477e90c25c8c6d7d4237281167aa56079382fc042db4b44a8328071649bfa" - name = "github.com/jcmturner/gofork" - packages = [ - "encoding/asn1", - "x/crypto/pbkdf2", - ] - pruneopts = "" - revision = "dc7c13fece037a4a36e2b3c69db4991498d30692" - version = "v1.0.0" - -[[projects]] - digest = "1:ca5c90960520407749b98c49650f54f5f90a667796e85c5ee1597478f702fb91" - name = "github.com/jessevdk/go-flags" - packages = ["."] - pruneopts = "" - revision = "c6ca198ec95c841fdb89fc0de7496fed11ab854e" - version = "v1.4.0" - -[[projects]] - branch = "batch" - digest = "1:f2dfbafa7faa143650abef3a721e6f654fec14422c0f3c03f7bec598622c102e" - name = "github.com/jmoiron/sqlx" - packages = [ - ".", - "reflectx", - ] - pruneopts = "" - revision = "fa49b1cf03f78497da59de5455f40a753fa4a31d" - source = "github.com/mfateev/sqlx" - -[[projects]] - digest = "1:302ad9379eb146668760df4d779a95379acab43ce5f9a28f27f3273f98232020" - name = "github.com/jonboulle/clockwork" - packages = ["."] - pruneopts = "" - revision = "2eee05ed794112d45db504eb05aa693efd2b8b09" - version = "v0.1.0" - -[[projects]] - digest = "1:e6b65ec5a09e42738d13762852d50a13865d9fccfd29576926e8ef05469388d9" - name = "github.com/kisielk/errcheck" - packages = [ - ".", - "internal/errcheck", - ] - pruneopts = "" - revision = "e14f8d59a22d460d56c5ee92507cd94c78fbf274" - version = "v1.2.0" - -[[projects]] - digest = "1:0f51cee70b0d254dbc93c22666ea2abf211af81c1701a96d04e2284b408621db" - name = "github.com/konsorten/go-windows-terminal-sequences" - packages = ["."] - pruneopts = "" - revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e" - version = "v1.0.2" - -[[projects]] - digest = "1:2b62d63ca71f89685c4f41373391319f6b67de7c61bbe7fdf2d89526a72fcb42" - name = "github.com/m3db/prometheus_client_golang" - packages = [ - "prometheus", - "prometheus/promhttp", - ] - pruneopts = "" - revision = "8ae269d24972b8695572fa6b2e3718b5ea82d6b4" - version = "v0.8.1" - -[[projects]] - digest = "1:954b01b825d4edc1b3e838e9faadfb8f70b308b000425d634d72896a81d3ce9f" - name = "github.com/m3db/prometheus_client_model" - packages = ["go"] - pruneopts = "" - revision = "d3fff8420252ef63bffb96f689d1a85096c97321" - version = "v0.1.0" - -[[projects]] - digest = "1:192c7420ce9083d46de731826d5908a2f571ce1a7be3188428ce388d4f12c16d" - name = "github.com/m3db/prometheus_common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "model", - ] - pruneopts = "" - revision = "d550673fc477123acb69017380567e8fafc765fc" - version = "v0.1.0" - -[[projects]] - digest = "1:a944f55a708b66c2eb6705eab880c71b0a053f76b4f3e02d4d47689f7774fb32" - name = "github.com/m3db/prometheus_procfs" - packages = ["."] - pruneopts = "" - revision = "1878d9fbb537119d24b21ca07effd591627cd160" - version = "v0.8.1" - -[[projects]] - branch = "master" - digest = "1:101e8200d89dcff2cec2522e427298045bea6d81ef71f8fe10810faeae741efc" - name = "github.com/mailru/easyjson" - packages = [ - ".", - "buffer", - "jlexer", - "jwriter", - ] - pruneopts = "" - revision = "b2ccc519800e761ac8000b95e5d57c80a897ff9e" - -[[projects]] - digest = "1:9ea83adf8e96d6304f394d40436f2eb44c1dc3250d223b74088cc253a6cd0a1c" - name = "github.com/mattn/go-colorable" - packages = ["."] - pruneopts = "" - revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072" - version = "v0.0.9" - -[[projects]] - digest = "1:dbfae9da5a674236b914e486086671145b37b5e3880a38da906665aede3c9eab" - name = "github.com/mattn/go-isatty" - packages = ["."] - pruneopts = "" - revision = "1311e847b0cb909da63b5fecfb5370aa66236465" - version = "v0.0.8" - -[[projects]] - digest = "1:84c28d9899cc4e00c38042d345cea8819275a5a62403a58530cac67022894776" - name = "github.com/mattn/go-runewidth" - packages = ["."] - pruneopts = "" - revision = "3ee7d812e62a0804a7d0a324e0249ca2db3476d3" - version = "v0.0.4" - -[[projects]] - digest = "1:63722a4b1e1717be7b98fc686e0b30d5e7f734b9e93d7dee86293b6deab7ea28" - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - pruneopts = "" - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - branch = "master" - digest = "1:7a1d2a0cff53707c18698fedd58d5f80e77c22eecf21c140bd529f72cfa62934" - name = "github.com/mmcloughlin/avo" - packages = [ - "attr", - "build", - "buildtags", - "gotypes", - "internal/prnt", - "internal/stack", - "ir", - "operand", - "pass", - "printer", - "reg", - "src", - "x86", - ] - pruneopts = "" - revision = "bb615f61ce85790a1667efc145c66e917cce1a39" - -[[projects]] - branch = "master" - digest = "1:6ad2df2c4d9e1e97e7a087a95a3753128ebad070f65d7ead633578870b06d09d" - name = "github.com/olekukonko/tablewriter" - packages = ["."] - pruneopts = "" - revision = "cc27d85e17cec9768d2ac401ea5d619a9628f16d" - -[[projects]] - digest = "1:01eb93f225544756fe9ce5d275679758578c85ebd035f20bc81f445198ac3f59" - name = "github.com/olivere/elastic" - packages = [ - ".", - "config", - "uritemplates", - ] - pruneopts = "" - revision = "8ebe6a0fc23d9d53fbd8890da8ae7ee7cea78dbe" - version = "v6.2.22" - -[[projects]] - digest = "1:1fc4897d3cc482d070651563c16a51489296cd9150e6d53fb7ff4d59a24334bc" - name = "github.com/opentracing/opentracing-go" - packages = [ - ".", - "ext", - "log", - ] - pruneopts = "" - revision = "659c90643e714681897ec2521c60567dd21da733" - version = "v1.1.0" - -[[projects]] - digest = "1:a5484d4fa43127138ae6e7b2299a6a52ae006c7f803d98d717f60abf3e97192e" - name = "github.com/pborman/uuid" - packages = ["."] - pruneopts = "" - revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1" - version = "v1.2" - -[[projects]] - digest = "1:230e4db8117493b67a1325ac736e78b7db5bc425deaa46a83ee882af934d8c22" - name = "github.com/pierrec/lz4" - packages = [ - ".", - "internal/xxh32", - ] - pruneopts = "" - revision = "8ef35db8296124c4969aab929c16c91c3cb2c8a0" - version = "v2.2.6" - -[[projects]] - digest = "1:1d7e1867c49a6dd9856598ef7c3123604ea3daabf5b83f303ff457bcbc410b1d" - name = "github.com/pkg/errors" - packages = ["."] - pruneopts = "" - revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" - version = "v0.8.1" - -[[projects]] - digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411" - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - pruneopts = "" - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - digest = "1:6bea0cda3fc62855d5312163e7d259fb97e31692d93c08cfffbeb2d00df0f13c" - name = "github.com/prometheus/client_golang" - packages = [ - "prometheus", - "prometheus/internal", - "prometheus/promhttp", - ] - pruneopts = "" - revision = "170205fb58decfd011f1550d4cfb737230d7ae4f" - version = "v1.1.0" - -[[projects]] - branch = "master" - digest = "1:cd67319ee7536399990c4b00fae07c3413035a53193c644549a676091507cadc" - name = "github.com/prometheus/client_model" - packages = ["go"] - pruneopts = "" - revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8" - -[[projects]] - digest = "1:0f2cee44695a3208fe5d6926076641499c72304e6f015348c9ab2df90a202cdf" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "model", - ] - pruneopts = "" - revision = "31bed53e4047fd6c510e43a941f90cb31be0972a" - version = "v0.6.0" - -[[projects]] - digest = "1:9b33e539d6bf6e4453668a847392d1e9e6345225ea1426f9341212c652bcbee4" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "internal/fs", - ] - pruneopts = "" - revision = "3f98efb27840a48a7a2898ec80be07674d19f9c8" - version = "v0.0.3" - -[[projects]] - branch = "master" - digest = "1:958bea286349ebdb3ec9f9e7c8ab6e283bc4c4ef9b9aea7ed2f1e10b0875b328" - name = "github.com/rcrowley/go-metrics" - packages = ["."] - pruneopts = "" - revision = "9beb055b7962d16947a14e1cd718098a2431e20e" - -[[projects]] - digest = "1:6ab228f39a195cb1dab3564a0f27dc24a52bb3a19fa58dd2967f1e7b2482d82b" - name = "github.com/robfig/cron" - packages = ["."] - pruneopts = "" - revision = "b41be1df696709bb6395fe435af20370037c0b4c" - version = "v1.2.0" - -[[projects]] - branch = "master" - digest = "1:7f10a9aff574ffa9cc40f41761b96c844becc0332e80bb95275a1bff9ef34040" - name = "github.com/samuel/go-thrift" - packages = ["parser"] - pruneopts = "" - revision = "e8b6b52668fe9c972220addc130edf46a9b466b1" - -[[projects]] - branch = "master" - digest = "1:b6138c1c8fc6ab555e3ee5b030072d962c39e3eb9be15205263e6101b6c5ad52" - name = "github.com/sirupsen/logrus" - packages = ["."] - pruneopts = "" - revision = "07a84ee7412e7a28663d92930a1d46f81b124ee1" - -[[projects]] - digest = "1:711eebe744c0151a9d09af2315f0bb729b2ec7637ef4c410fa90a18ef74b65b6" - name = "github.com/stretchr/objx" - packages = ["."] - pruneopts = "" - revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c" - version = "v0.1.1" - -[[projects]] - digest = "1:381bcbeb112a51493d9d998bbba207a529c73dbb49b3fd789e48c63fac1f192c" - name = "github.com/stretchr/testify" - packages = [ - "assert", - "mock", - "require", - "suite", - ] - pruneopts = "" - revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053" - version = "v1.3.0" - -[[projects]] - digest = "1:a3c73ad1e5fd96b7999e4daab8ce66d67295880f5da8106c33b3add6c5a20e92" - name = "github.com/uber-common/bark" - packages = ["."] - pruneopts = "" - revision = "02d883c81a4e7b76904d97efb176efdf4be791bd" - version = "v1.2.1" - -[[projects]] - digest = "1:2b232337b83d0433c03c1b05c55e9bde7562f7134cf50bbbd2696faace7364b1" - name = "github.com/uber-go/kafka-client" - packages = [ - ".", - "internal/consumer", - "internal/list", - "internal/metrics", - "internal/util", - "kafka", - ] - pruneopts = "" - revision = "be56bf4e711245701de362714ee898d1ad621f39" - version = "v0.2.2" - -[[projects]] - digest = "1:861312e1c66a6935183495ad08db511fb7cf434a9da12c49d0be9b7725f3a22b" - name = "github.com/uber-go/mapdecode" - packages = [ - ".", - "internal/mapstructure", - ] - pruneopts = "" - revision = "718b4994083e432669f44a00174c5f1bcdb1434d" - version = "v1.0.0" - -[[projects]] - digest = "1:aaef0733d4f8a38791c7b465fd4ab1a3f9afb6bdbd6bff2712deb803a0bcab1e" - name = "github.com/uber-go/tally" - packages = [ - ".", - "m3", - "m3/customtransports", - "m3/thrift", - "m3/thriftudp", - "prometheus", - "statsd", - "thirdparty/github.com/apache/thrift/lib/go/thrift", - ] - pruneopts = "" - revision = "3332297784e46cd346ab6d9894fd4ea027dc9368" - version = "v3.3.12" - -[[projects]] - digest = "1:86555acbb9507153d3cd0d032e07279ba89e38aadc8200cfca3b5d14c98b4daf" - name = "github.com/uber/ringpop-go" - packages = [ - ".", - "discovery", - "discovery/jsonfile", - "discovery/statichosts", - "events", - "forward", - "hashring", - "logging", - "membership", - "shared", - "swim", - "util", - ] - pruneopts = "" - revision = "08d399785ee54fdae8e4bd8b7b481673f52739cc" - version = "v0.8.5" - -[[projects]] - digest = "1:226a83352ebbbf5009b3d767bc998ced618b028055a48b367d9cf357f2f3e714" - name = "github.com/uber/tchannel-go" - packages = [ - ".", - "internal/argreader", - "json", - "raw", - "relay", - "thrift", - "thrift/gen-go/meta", - "thrift/thrift-gen", - "tnet", - "tos", - "trand", - "typed", - ] - pruneopts = "" - revision = "58b01ae38cfff68102223e374c580a40c1d91dda" - version = "v1.14.0" - -[[projects]] - branch = "master" - digest = "1:13a24c4398a86df35169b438a14a81eef3b63037ea57fd609aa291461d1a003b" - name = "github.com/urfave/cli" - packages = ["."] - pruneopts = "" - revision = "93392d12e8cd60a1c7c62dc4bf4bab7a8e001eec" - -[[projects]] - digest = "1:ded68a73f6dcedbf1b90909e00a28417854bfa8fb3a4ca6f12f11960e151214e" - name = "github.com/valyala/fastjson" - packages = [ - ".", - "fastfloat", - ] - pruneopts = "" - revision = "73ca3ce4bcd1cb08f60e48df25675b80c6a4e51e" - version = "v1.4.1" - -[[projects]] - branch = "master" - digest = "1:0870e51ccaa635794d3071e4379f14605188d4d3239bf6a7a4b154796682758d" - name = "github.com/xwb1989/sqlparser" - packages = [ - ".", - "dependency/bytes2", - "dependency/hack", - "dependency/querypb", - "dependency/sqltypes", - ] - pruneopts = "" - revision = "120387863bf27d04bc07db8015110a6e96d0146c" - -[[projects]] - digest = "1:e6ff7840319b6fda979a918a8801005ec2049abca62af19211d96971d8ec3327" - name = "go.uber.org/atomic" - packages = ["."] - pruneopts = "" - revision = "df976f2515e274675050de7b3f42545de80594fd" - version = "v1.4.0" - -[[projects]] - branch = "master" - digest = "1:7a04aa4482371ededbe3f26410b80c8ab50415050947fa43140c165f7face690" - name = "go.uber.org/cadence" - packages = [ - ".", - ".gen/go/cadence", - ".gen/go/cadence/workflowserviceclient", - ".gen/go/cadence/workflowservicetest", - ".gen/go/shared", - "activity", - "client", - "encoded", - "internal", - "internal/common", - "internal/common/backoff", - "internal/common/cache", - "internal/common/metrics", - "internal/common/util", - "testsuite", - "worker", - "workflow", - ] - pruneopts = "" - revision = "1e3023da270d1dc51fbcf8cd47404f3ce4018834" - -[[projects]] - digest = "1:22c7effcb4da0eacb2bb1940ee173fac010e9ef3c691f5de4b524d538bd980f5" - name = "go.uber.org/multierr" - packages = ["."] - pruneopts = "" - revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" - version = "v1.1.0" - -[[projects]] - digest = "1:09c15b6a5ed2f162a2e23662358b07f462918044c8974fbf0a8f30a47c66c4c0" - name = "go.uber.org/net/metrics" - packages = [ - ".", - "bucket", - "push", - "tallypush", - ] - pruneopts = "" - revision = "0327310766f9da183152db859680ae0345c31957" - version = "v1.1.0" - -[[projects]] - digest = "1:5482e2299c1e5fb1954190d95b848ee098c51f548810997eb315ab387b623538" - name = "go.uber.org/thriftrw" - packages = [ - ".", - "ast", - "compile", - "envelope", - "gen", - "idl", - "idl/internal", - "internal/concurrent", - "internal/curry", - "internal/envelope", - "internal/envelope/exception", - "internal/frame", - "internal/goast", - "internal/multiplex", - "internal/plugin", - "internal/plugin/builtin/pluginapigen", - "internal/process", - "internal/semver", - "plugin", - "plugin/api", - "protocol", - "protocol/binary", - "ptr", - "thriftreflect", - "version", - "wire", - ] - pruneopts = "" - revision = "8cb9e79c2118f1eb873714e2d02747d6ecc62954" - version = "v1.20.1" - -[[projects]] - branch = "master" - digest = "1:4f9d5475f0ab52f0dc28c6bfd2cfeebb0a5f823e8ae4cc8bc53ae112762d275c" - name = "go.uber.org/tools" - packages = ["update-license"] - pruneopts = "" - revision = "2cfd321de3ee5d5f8a5fda2521d1703478334d98" - -[[projects]] - digest = "1:1309eaa2e3501a519fdc01fd4baa3214eae4612d83eb9758d314033f05c791bb" - name = "go.uber.org/yarpc" - packages = [ - ".", - "api/backoff", - "api/encoding", - "api/middleware", - "api/peer", - "api/transport", - "encoding/thrift", - "encoding/thrift/internal", - "internal", - "internal/backoff", - "internal/bufferpool", - "internal/config", - "internal/digester", - "internal/errorsync", - "internal/humanize", - "internal/inboundmiddleware", - "internal/interpolate", - "internal/introspection", - "internal/iopool", - "internal/observability", - "internal/outboundmiddleware", - "internal/request", - "internal/yarpcerrors", - "peer", - "peer/hostport", - "peer/peerlist/v2", - "peer/roundrobin", - "pkg/encoding", - "pkg/errors", - "pkg/lifecycle", - "pkg/procedure", - "transport/tchannel", - "transport/tchannel/internal", - "yarpcconfig", - "yarpcerrors", - ] - pruneopts = "" - revision = "32f8a122a940ef0bd42bef358ea46c7d91b14f4c" - version = "v1.39.0" - -[[projects]] - digest = "1:984e93aca9088b440b894df41f2043b6a3db8f9cf30767032770bfc4796993b0" - name = "go.uber.org/zap" - packages = [ - ".", - "buffer", - "internal/bufferpool", - "internal/color", - "internal/exit", - "zapcore", - ] - pruneopts = "" - revision = "27376062155ad36be76b0f12cf1572a221d3a48c" - version = "v1.10.0" - -[[projects]] - branch = "master" - digest = "1:086760278d762dbb0e9a26e09b57f04c89178c86467d8d94fae47d64c222f328" - name = "golang.org/x/crypto" - packages = [ - "md4", - "pbkdf2", - ] - pruneopts = "" - revision = "4def268fd1a49955bfb3dda92fe3db4f924f2285" - -[[projects]] - branch = "master" - digest = "1:99dc4c94284ccd0d515bc11c906edcc33da407af228c6cf75b6f20e2cd3e6ab6" - name = "golang.org/x/lint" - packages = [ - ".", - "golint", - ] - pruneopts = "" - revision = "959b441ac422379a43da2230f62be024250818b0" - -[[projects]] - branch = "master" - digest = "1:955694a7c42527d7fb188505a22f10b3e158c6c2cf31fe64b1e62c9ab7b18401" - name = "golang.org/x/net" - packages = [ - "bpf", - "context", - "internal/iana", - "internal/socket", - "internal/socks", - "ipv4", - "ipv6", - "proxy", - ] - pruneopts = "" - revision = "ca1201d0de80cfde86cb01aea620983605dfe99b" - source = "https://github.com/golang/net" - -[[projects]] - branch = "master" - digest = "1:0b5c2207c72f2d13995040f176feb6e3f453d6b01af2b9d57df76b05ded2e926" - name = "golang.org/x/sys" - packages = [ - "unix", - "windows", - ] - pruneopts = "" - revision = "51ab0e2deafac1f46c46ad59cf0921be2f180c3d" - -[[projects]] - branch = "master" - digest = "1:9522af4be529c108010f95b05f1022cb872f2b9ff8b101080f554245673466e1" - name = "golang.org/x/time" - packages = ["rate"] - pruneopts = "" - revision = "9d24e82272b4f38b78bc8cff74fa936d31ccd8ef" - -[[projects]] - branch = "master" - digest = "1:761bb6ea1becb2ac789e5931c1c413cc063b14bc6f6dabe78cc0df9fa819bae6" - name = "golang.org/x/tools" - packages = [ - "cmd/stringer", - "go/analysis", - "go/analysis/passes/inspect", - "go/ast/astutil", - "go/ast/inspector", - "go/buildutil", - "go/gcexportdata", - "go/internal/gcimporter", - "go/internal/packagesdriver", - "go/packages", - "go/types/objectpath", - "go/types/typeutil", - "internal/fastwalk", - "internal/gopathwalk", - "internal/semver", - ] - pruneopts = "" - revision = "88ddfcebc769cb7884c38d144ee893cfb4519053" - -[[projects]] - digest = "1:47f391ee443f578f01168347818cb234ed819521e49e4d2c8dd2fb80d48ee41a" - name = "google.golang.org/appengine" - packages = ["cloudsql"] - pruneopts = "" - revision = "b2f4a3cf3c67576a2ee09e1fe62656a5086ce880" - version = "v1.6.1" - -[[projects]] - digest = "1:75fb3fcfc73a8c723efde7777b40e8e8ff9babf30d8c56160d01beffea8a95a6" - name = "gopkg.in/inf.v0" - packages = ["."] - pruneopts = "" - revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" - version = "v0.9.1" - -[[projects]] - digest = "1:4777ba481cc12866b89aafb0a67529e7ac48b9aea06a25f3737b2cf5a3ffda12" - name = "gopkg.in/jcmturner/aescts.v1" - packages = ["."] - pruneopts = "" - revision = "f6abebb3171c4c1b1fea279cb7c7325020a26290" - version = "v1.0.1" - -[[projects]] - digest = "1:84c5b1392ef65ad1bb64da4b4d0beb2f204eefc769d6d96082347bb7057cb7b1" - name = "gopkg.in/jcmturner/dnsutils.v1" - packages = ["."] - pruneopts = "" - revision = "13eeb8d49ffb74d7a75784c35e4d900607a3943c" - version = "v1.0.1" - -[[projects]] - digest = "1:502ab576ba8c47c4de77fe3f2b2386adc1a1447bb5afae2ac7bf0edd2b6f7c52" - name = "gopkg.in/jcmturner/gokrb5.v7" - packages = [ - "asn1tools", - "client", - "config", - "credentials", - "crypto", - "crypto/common", - "crypto/etype", - "crypto/rfc3961", - "crypto/rfc3962", - "crypto/rfc4757", - "crypto/rfc8009", - "gssapi", - "iana", - "iana/addrtype", - "iana/adtype", - "iana/asnAppTag", - "iana/chksumtype", - "iana/errorcode", - "iana/etypeID", - "iana/flags", - "iana/keyusage", - "iana/msgtype", - "iana/nametype", - "iana/patype", - "kadmin", - "keytab", - "krberror", - "messages", - "pac", - "types", - ] - pruneopts = "" - revision = "363118e62befa8a14ff01031c025026077fe5d6d" - version = "v7.3.0" - -[[projects]] - digest = "1:f9956ccc103c6208cd50c71ee5191b6fdcc635972c12624ef949c9b20b2bb9d1" - name = "gopkg.in/jcmturner/rpc.v1" - packages = [ - "mstypes", - "ndr", - ] - pruneopts = "" - revision = "99a8ce2fbf8b8087b6ed12a37c61b10f04070043" - version = "v1.1.0" - -[[projects]] - branch = "v2" - digest = "1:bf922c860f64bb6372daa82bb9c1e8ed144e97ac7587efa80d08b4e98737e9ea" - name = "gopkg.in/validator.v2" - packages = ["."] - pruneopts = "" - revision = "135c24b11c19e52befcae2ec3fca5d9b78c4e98e" - -[[projects]] - digest = "1:cedccf16b71e86db87a24f8d4c70b0a855872eb967cb906a66b95de56aefbd0d" - name = "gopkg.in/yaml.v2" - packages = ["."] - pruneopts = "" - revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" - version = "v2.2.2" - -[[projects]] - digest = "1:c296d0de2e701b19a8fc5dc761af55a00dbefa4467e70c2f8cb12f289bf2eb5a" - name = "honnef.co/go/tools" - packages = [ - "arg", - "cmd/staticcheck", - "config", - "deprecated", - "facts", - "functions", - "go/types/typeutil", - "internal/cache", - "internal/passes/buildssa", - "internal/renameio", - "internal/sharedcheck", - "lint", - "lint/lintdsl", - "lint/lintutil", - "lint/lintutil/format", - "loader", - "printf", - "simple", - "ssa", - "ssautil", - "staticcheck", - "staticcheck/vrp", - "stylecheck", - "unused", - "version", - ] - pruneopts = "" - revision = "72554cb117ad340748b3093e7108983fd984c9f2" - version = "2019.2.2" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/Shopify/sarama", - "github.com/apache/thrift/lib/go/thrift", - "github.com/bsm/sarama-cluster", - "github.com/cactus/go-statsd-client/statsd", - "github.com/cch123/elasticsql", - "github.com/davecgh/go-spew/spew", - "github.com/dgryski/go-farm", - "github.com/emirpasic/gods/maps/treemap", - "github.com/fatih/color", - "github.com/go-sql-driver/mysql", - "github.com/gocql/gocql", - "github.com/golang/mock/gomock", - "github.com/google/uuid", - "github.com/hashicorp/go-version", - "github.com/iancoleman/strcase", - "github.com/jmoiron/sqlx", - "github.com/jonboulle/clockwork", - "github.com/m3db/prometheus_client_golang/prometheus", - "github.com/olekukonko/tablewriter", - "github.com/olivere/elastic", - "github.com/opentracing/opentracing-go", - "github.com/pborman/uuid", - "github.com/robfig/cron", - "github.com/sirupsen/logrus", - "github.com/stretchr/testify/assert", - "github.com/stretchr/testify/mock", - "github.com/stretchr/testify/require", - "github.com/stretchr/testify/suite", - "github.com/uber-go/kafka-client", - "github.com/uber-go/kafka-client/kafka", - "github.com/uber-go/tally", - "github.com/uber-go/tally/m3", - "github.com/uber-go/tally/prometheus", - "github.com/uber-go/tally/statsd", - "github.com/uber/ringpop-go", - "github.com/uber/ringpop-go/discovery", - "github.com/uber/ringpop-go/discovery/jsonfile", - "github.com/uber/ringpop-go/discovery/statichosts", - "github.com/uber/ringpop-go/events", - "github.com/uber/ringpop-go/hashring", - "github.com/uber/ringpop-go/swim", - "github.com/uber/tchannel-go", - "github.com/urfave/cli", - "github.com/valyala/fastjson", - "github.com/xwb1989/sqlparser", - "go.uber.org/atomic", - "go.uber.org/cadence", - "go.uber.org/cadence/.gen/go/cadence/workflowserviceclient", - "go.uber.org/cadence/.gen/go/cadence/workflowservicetest", - "go.uber.org/cadence/.gen/go/shared", - "go.uber.org/cadence/activity", - "go.uber.org/cadence/client", - "go.uber.org/cadence/encoded", - "go.uber.org/cadence/testsuite", - "go.uber.org/cadence/worker", - "go.uber.org/cadence/workflow", - "go.uber.org/multierr", - "go.uber.org/thriftrw/protocol", - "go.uber.org/thriftrw/thriftreflect", - "go.uber.org/thriftrw/wire", - "go.uber.org/yarpc", - "go.uber.org/yarpc/api/peer", - "go.uber.org/yarpc/api/transport", - "go.uber.org/yarpc/encoding/thrift", - "go.uber.org/yarpc/peer/roundrobin", - "go.uber.org/yarpc/transport/tchannel", - "go.uber.org/yarpc/yarpcerrors", - "go.uber.org/zap", - "go.uber.org/zap/zapcore", - "golang.org/x/net/context", - "golang.org/x/time/rate", - "gopkg.in/validator.v2", - "gopkg.in/yaml.v2", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index 8aacfad2948..00000000000 --- a/Gopkg.toml +++ /dev/null @@ -1,156 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - -ignored = ["github.com/uber/cadence/.gen"] - -[[constraint]] - name = "github.com/Shopify/sarama" - version = "1.17.0" - -[[constraint]] - name = "github.com/apache/thrift" - version = "0.10.0" - -[[constraint]] - name = "github.com/jonboulle/clockwork" - version = "0.1.0" - -[[constraint]] - name = "github.com/cactus/go-statsd-client" - version = "3.1.1" - -[[constraint]] - name = "github.com/davecgh/go-spew" - version = "1.1.0" - -[[constraint]] - branch = "master" - name = "github.com/dgryski/go-farm" - -[[constraint]] - branch = "master" - name = "github.com/emirpasic/gods" - -[[constraint]] - branch = "master" - name = "github.com/fatih/color" - -[[constraint]] - name = "github.com/gocql/gocql" - revision = "56a164ee9f3135e9cfe725a6d25939f24cb2d044" - -[[constraint]] - name = "github.com/golang/mock" - version = "1.1.1" - -[[constraint]] - branch = "master" - name = "github.com/olekukonko/tablewriter" - -[[constraint]] - name = "github.com/pborman/uuid" - version = "1.0.0" - -[[constraint]] - name = "github.com/google/uuid" - version = "1.1.0" - -[[constraint]] - branch = "master" - name = "github.com/sirupsen/logrus" - -[[constraint]] - name = "github.com/stretchr/testify" - version = "1.2.2" - -[[constraint]] - name = "github.com/uber-go/kafka-client" - version = "0.2.0" - -[[constraint]] - name = "github.com/uber-go/tally" - version = "3.3.9" - -[[constraint]] - name = "github.com/uber/ringpop-go" - version = "0.8.0" - -[[constraint]] - name = "github.com/uber/tchannel-go" - version = "1.0.0" - -[[constraint]] - branch = "master" - name = "github.com/urfave/cli" - -[[constraint]] - name = "go.uber.org/atomic" - version = "1.3.2" - -[[constraint]] - name = "go.uber.org/cadence" - branch = "master" - -[[constraint]] - name = "go.uber.org/yarpc" - version = "1.7.1" - -[[constraint]] - name = "go.uber.org/zap" - version = "1.9.0" - -[[constraint]] - name = "golang.org/x/net" - source = "https://github.com/golang/net" - -[[constraint]] - branch = "master" - name = "golang.org/x/time" - -[[constraint]] - name = "go.uber.org/thriftrw" - version = "1.6.0" - -[[constraint]] - branch = "batch" - name = "github.com/jmoiron/sqlx" - source = "github.com/mfateev/sqlx" - -[[constraint]] - name = "github.com/robfig/cron" - version = "1.1.0" - -[[constraint]] - name = "github.com/hashicorp/go-version" - version = "1.1.0" - -[[constraint]] - branch = "master" - name = "github.com/cch123/elasticsql" - -[[constraint]] - name = "github.com/valyala/fastjson" - version = "1.4.1" diff --git a/Makefile b/Makefile index 07786028ca5..dae0573f8f5 100644 --- a/Makefile +++ b/Makefile @@ -189,13 +189,13 @@ cover_xdc_profile: clean bins_nothrift $(COVER_ROOT)/cover.out: $(UNIT_COVER_FILE) $(INTEG_CASS_COVER_FILE) $(INTEG_CASS_EV2_COVER_FILE) $(INTEG_XDC_CASS_COVER_FILE) $(INTEG_SQL_COVER_FILE) $(INTEG_SQL_EV2_COVER_FILE) $(INTEG_XDC_SQL_COVER_FILE) @echo "mode: atomic" > $(COVER_ROOT)/cover.out - cat $(UNIT_COVER_FILE) | grep -v "^mode: \w\+" | grep -v ".gen" >> $(COVER_ROOT)/cover.out - cat $(INTEG_CASS_COVER_FILE) | grep -v "^mode: \w\+" | grep -v ".gen" >> $(COVER_ROOT)/cover.out - cat $(INTEG_CASS_EV2_COVER_FILE) | grep -v "^mode: \w\+" | grep -v ".gen" >> $(COVER_ROOT)/cover.out - cat $(INTEG_XDC_CASS_COVER_FILE) | grep -v "^mode: \w\+" | grep -v ".gen" >> $(COVER_ROOT)/cover.out - cat $(INTEG_SQL_COVER_FILE) | grep -v "^mode: \w\+" | grep -v ".gen" >> $(COVER_ROOT)/cover.out - cat $(INTEG_SQL_EV2_COVER_FILE) | grep -v "^mode: \w\+" | grep -v ".gen" >> $(COVER_ROOT)/cover.out - cat $(INTEG_XDC_SQL_COVER_FILE) | grep -v "^mode: \w\+" | grep -v ".gen" >> $(COVER_ROOT)/cover.out + cat $(UNIT_COVER_FILE) | grep -v "^mode: \w\+" | grep -vP ".gen|[Mm]ock[s]?" >> $(COVER_ROOT)/cover.out + cat $(INTEG_CASS_COVER_FILE) | grep -v "^mode: \w\+" | grep -vP ".gen|[Mm]ock[s]?" >> $(COVER_ROOT)/cover.out + cat $(INTEG_CASS_EV2_COVER_FILE) | grep -v "^mode: \w\+" | grep -vP ".gen|[Mm]ock[s]?" >> $(COVER_ROOT)/cover.out + cat $(INTEG_XDC_CASS_COVER_FILE) | grep -v "^mode: \w\+" | grep -vP ".gen|[Mm]ock[s]?" >> $(COVER_ROOT)/cover.out + cat $(INTEG_SQL_COVER_FILE) | grep -v "^mode: \w\+" | grep -vP ".gen|[Mm]ock[s]?" >> $(COVER_ROOT)/cover.out + cat $(INTEG_SQL_EV2_COVER_FILE) | grep -v "^mode: \w\+" | grep -vP ".gen|[Mm]ock[s]?" >> $(COVER_ROOT)/cover.out + cat $(INTEG_XDC_SQL_COVER_FILE) | grep -v "^mode: \w\+" | grep -vP ".gen|[Mm]ock[s]?" >> $(COVER_ROOT)/cover.out cover: $(COVER_ROOT)/cover.out go tool cover -html=$(COVER_ROOT)/cover.out; diff --git a/client/clientBean.go b/client/clientBean.go index 2d9887f7fe1..e9bd9c65b5e 100644 --- a/client/clientBean.go +++ b/client/clientBean.go @@ -102,14 +102,9 @@ func NewClientBean(factory Factory, dispatcherProvider DispatcherProvider, clust return nil, err } - frontendClient, err := factory.NewFrontendClient() - if err != nil { - return nil, err - } - remoteAdminClients := map[string]admin.Client{} remoteFrontendClients := map[string]frontend.Client{} - for cluster, info := range clusterMetadata.GetAllClusterInfo() { + for clusterName, info := range clusterMetadata.GetAllClusterInfo() { dispatcher, err := dispatcherProvider.Get(info.RPCName, info.RPCAddress) if err != nil { return nil, err @@ -124,7 +119,7 @@ func NewClientBean(factory Factory, dispatcherProvider DispatcherProvider, clust return nil, err } - frontendclient, err := factory.NewFrontendClientWithTimeoutAndDispatcher( + frontendClient, err := factory.NewFrontendClientWithTimeoutAndDispatcher( info.RPCName, frontend.DefaultTimeout, frontend.DefaultLongPollTimeout, @@ -134,14 +129,14 @@ func NewClientBean(factory Factory, dispatcherProvider DispatcherProvider, clust return nil, err } - remoteAdminClients[cluster] = adminClient - remoteFrontendClients[cluster] = frontendclient + remoteAdminClients[clusterName] = adminClient + remoteFrontendClients[clusterName] = frontendClient } return &clientBeanImpl{ factory: factory, historyClient: historyClient, - frontendClient: frontendClient, + frontendClient: remoteFrontendClients[clusterMetadata.GetCurrentClusterName()], remoteAdminClients: remoteAdminClients, remoteFrontendClients: remoteFrontendClients, }, nil diff --git a/common/archiver/filestore/queryParser.go b/common/archiver/filestore/queryParser.go index 92765612964..da498542198 100644 --- a/common/archiver/filestore/queryParser.go +++ b/common/archiver/filestore/queryParser.go @@ -24,8 +24,14 @@ package filestore import ( "errors" + "fmt" + "strconv" + "strings" + "time" "github.com/uber/cadence/.gen/go/shared" + "github.com/uber/cadence/common" + "github.com/xwb1989/sqlparser" ) type ( @@ -43,14 +49,220 @@ type ( runID *string workflowTypeName *string closeStatus *shared.WorkflowExecutionCloseStatus + emptyResult bool } ) +// All allowed fields for filtering +const ( + WorkflowID = "WorkflowID" + RunID = "RunID" + WorkflowType = "WorkflowType" + CloseTime = "CloseTime" + CloseStatus = "CloseStatus" +) + +const ( + queryTemplate = "select * from dummy where %s" + + defaultDateTimeFormat = time.RFC3339 +) + // NewQueryParser creates a new query parser for filestore func NewQueryParser() QueryParser { return &queryParser{} } func (p *queryParser) Parse(query string) (*parsedQuery, error) { - return nil, errors.New("method not implemented") + stmt, err := sqlparser.Parse(fmt.Sprintf(queryTemplate, query)) + if err != nil { + return nil, err + } + whereExpr := stmt.(*sqlparser.Select).Where.Expr + parsedQuery := &parsedQuery{ + earliestCloseTime: 0, + latestCloseTime: time.Now().UnixNano(), + } + if err := p.convertWhereExpr(whereExpr, parsedQuery); err != nil { + return nil, err + } + return parsedQuery, nil +} + +func (p *queryParser) convertWhereExpr(expr sqlparser.Expr, parsedQuery *parsedQuery) error { + if expr == nil { + return errors.New("where expression is nil") + } + + switch expr.(type) { + case *sqlparser.ComparisonExpr: + return p.convertComparisonExpr(expr.(*sqlparser.ComparisonExpr), parsedQuery) + case *sqlparser.AndExpr: + return p.convertAndExpr(expr.(*sqlparser.AndExpr), parsedQuery) + case *sqlparser.ParenExpr: + return p.convertParenExpr(expr.(*sqlparser.ParenExpr), parsedQuery) + default: + return errors.New("only comparsion and \"and\" expression is supported") + } +} + +func (p *queryParser) convertParenExpr(parenExpr *sqlparser.ParenExpr, parsedQuery *parsedQuery) error { + return p.convertWhereExpr(parenExpr.Expr, parsedQuery) +} + +func (p *queryParser) convertAndExpr(andExpr *sqlparser.AndExpr, parsedQuery *parsedQuery) error { + if err := p.convertWhereExpr(andExpr.Left, parsedQuery); err != nil { + return err + } + if err := p.convertWhereExpr(andExpr.Right, parsedQuery); err != nil { + return err + } + return nil +} + +func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, parsedQuery *parsedQuery) error { + colName, ok := compExpr.Left.(*sqlparser.ColName) + if !ok { + return fmt.Errorf("invalid filter name: %s", sqlparser.String(compExpr.Left)) + } + colNameStr := sqlparser.String(colName) + op := compExpr.Operator + valExpr, ok := compExpr.Right.(*sqlparser.SQLVal) + if !ok { + return fmt.Errorf("invalid value: %s", sqlparser.String(compExpr.Right)) + } + valStr := sqlparser.String(valExpr) + + switch colNameStr { + case WorkflowID: + val, err := extractStringValue(valStr) + if err != nil { + return err + } + if op != "=" { + return fmt.Errorf("only operation = is support for %s", WorkflowID) + } + if parsedQuery.workflowID != nil && *parsedQuery.workflowID != val { + parsedQuery.emptyResult = true + return nil + } + parsedQuery.workflowID = common.StringPtr(val) + case RunID: + val, err := extractStringValue(valStr) + if err != nil { + return err + } + if op != "=" { + return fmt.Errorf("only operation = is support for %s", RunID) + } + if parsedQuery.runID != nil && *parsedQuery.runID != val { + parsedQuery.emptyResult = true + return nil + } + parsedQuery.runID = common.StringPtr(val) + case WorkflowType: + val, err := extractStringValue(valStr) + if err != nil { + return err + } + if op != "=" { + return fmt.Errorf("only operation = is support for %s", WorkflowType) + } + if parsedQuery.workflowTypeName != nil && *parsedQuery.workflowTypeName != val { + parsedQuery.emptyResult = true + return nil + } + parsedQuery.workflowTypeName = common.StringPtr(val) + case CloseStatus: + val, err := extractStringValue(valStr) + if err != nil { + return err + } + if op != "=" { + return fmt.Errorf("only operation = is support for %s", CloseStatus) + } + status, err := convertStatusStr(val) + if err != nil { + return err + } + if parsedQuery.closeStatus != nil && *parsedQuery.closeStatus != status { + parsedQuery.emptyResult = true + return nil + } + parsedQuery.closeStatus = status.Ptr() + case CloseTime: + timestamp, err := convertToTimestamp(valStr) + if err != nil { + return err + } + return p.convertCloseTime(timestamp, op, parsedQuery) + default: + return fmt.Errorf("unknown filter name: %s", colNameStr) + } + + return nil +} + +func (p *queryParser) convertCloseTime(timestamp int64, op string, parsedQuery *parsedQuery) error { + switch op { + case "=": + if err := p.convertCloseTime(timestamp, ">=", parsedQuery); err != nil { + return err + } + if err := p.convertCloseTime(timestamp, "<=", parsedQuery); err != nil { + return err + } + case "<": + parsedQuery.latestCloseTime = common.MinInt64(parsedQuery.latestCloseTime, timestamp-1) + case "<=": + parsedQuery.latestCloseTime = common.MinInt64(parsedQuery.latestCloseTime, timestamp) + case ">": + parsedQuery.earliestCloseTime = common.MaxInt64(parsedQuery.earliestCloseTime, timestamp+1) + case ">=": + parsedQuery.earliestCloseTime = common.MaxInt64(parsedQuery.earliestCloseTime, timestamp) + default: + return fmt.Errorf("operator %s is not supported for close time", op) + } + return nil +} + +func convertToTimestamp(timeStr string) (int64, error) { + timestamp, err := strconv.ParseInt(timeStr, 10, 64) + if err == nil { + return timestamp, nil + } + timestampStr, err := extractStringValue(timeStr) + if err != nil { + return 0, err + } + parsedTime, err := time.Parse(defaultDateTimeFormat, timestampStr) + if err != nil { + return 0, err + } + return parsedTime.UnixNano(), nil +} + +func convertStatusStr(statusStr string) (shared.WorkflowExecutionCloseStatus, error) { + statusStr = strings.ToLower(statusStr) + switch statusStr { + case "completed": + return shared.WorkflowExecutionCloseStatusCompleted, nil + case "failed": + return shared.WorkflowExecutionCloseStatusFailed, nil + case "canceled": + return shared.WorkflowExecutionCloseStatusCanceled, nil + case "continuedasnew": + return shared.WorkflowExecutionCloseStatusContinuedAsNew, nil + case "timedout": + return shared.WorkflowExecutionCloseStatusTimedOut, nil + default: + return 0, fmt.Errorf("unknown workflow close status: %s", statusStr) + } +} + +func extractStringValue(s string) (string, error) { + if len(s) >= 2 && s[0] == '\'' && s[len(s)-1] == '\'' { + return s[1 : len(s)-1], nil + } + return "", fmt.Errorf("value %s is not a string value", s) } diff --git a/common/archiver/filestore/queryParser_test.go b/common/archiver/filestore/queryParser_test.go new file mode 100644 index 00000000000..346f4bb6b41 --- /dev/null +++ b/common/archiver/filestore/queryParser_test.go @@ -0,0 +1,322 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package filestore + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/uber/cadence/.gen/go/shared" + "github.com/uber/cadence/common" +) + +type queryParserSuite struct { + *require.Assertions + suite.Suite + + parser QueryParser +} + +func TestQueryParserSuite(t *testing.T) { + suite.Run(t, new(queryParserSuite)) +} + +func (s *queryParserSuite) SetupTest() { + s.Assertions = require.New(s.T()) + s.parser = NewQueryParser() +} + +func (s *queryParserSuite) TestParseWorkflowID_RunID_WorkflowType() { + testCases := []struct { + query string + expectErr bool + parsedQuery *parsedQuery + }{ + { + query: "WorkflowID = \"random workflowID\"", + expectErr: false, + parsedQuery: &parsedQuery{ + workflowID: common.StringPtr("random workflowID"), + }, + }, + { + query: "WorkflowID = \"random workflowID\" and WorkflowID = \"random workflowID\"", + expectErr: false, + parsedQuery: &parsedQuery{ + workflowID: common.StringPtr("random workflowID"), + }, + }, + { + query: "RunID = \"random runID\"", + expectErr: false, + parsedQuery: &parsedQuery{ + runID: common.StringPtr("random runID"), + }, + }, + { + query: "WorkflowType = \"random typeName\"", + expectErr: false, + parsedQuery: &parsedQuery{ + workflowTypeName: common.StringPtr("random typeName"), + }, + }, + { + query: "WorkflowID = 'random workflowID'", + expectErr: false, + parsedQuery: &parsedQuery{ + workflowID: common.StringPtr("random workflowID"), + }, + }, + { + query: "WorkflowType = 'random typeName' and WorkflowType = \"another typeName\"", + expectErr: false, + parsedQuery: &parsedQuery{ + emptyResult: true, + }, + }, + { + query: "WorkflowType = 'random typeName' and (WorkflowID = \"random workflowID\" and RunID='random runID')", + expectErr: false, + parsedQuery: &parsedQuery{ + workflowID: common.StringPtr("random workflowID"), + runID: common.StringPtr("random runID"), + workflowTypeName: common.StringPtr("random typeName"), + }, + }, + { + query: "runID = random workflowID", + expectErr: true, + }, + { + query: "WorkflowID = \"random workflowID\" or WorkflowID = \"another workflowID\"", + expectErr: true, + }, + { + query: "WorkflowID = \"random workflowID\" or runID = \"random runID\"", + expectErr: true, + }, + { + query: "workflowid = \"random workflowID\"", + expectErr: true, + }, + { + query: "runID > \"random workflowID\"", + expectErr: true, + }, + } + + for _, tc := range testCases { + parsedQuery, err := s.parser.Parse(tc.query) + if tc.expectErr { + s.Error(err) + continue + } + s.NoError(err) + s.Equal(tc.parsedQuery.emptyResult, parsedQuery.emptyResult) + if !tc.parsedQuery.emptyResult { + s.Equal(tc.parsedQuery.workflowID, parsedQuery.workflowID) + s.Equal(tc.parsedQuery.runID, parsedQuery.runID) + s.Equal(tc.parsedQuery.workflowTypeName, parsedQuery.workflowTypeName) + } + } +} + +func (s *queryParserSuite) TestParseCloseStatus() { + testCases := []struct { + query string + expectErr bool + parsedQuery *parsedQuery + }{ + { + query: "CloseStatus = \"Completed\"", + expectErr: false, + parsedQuery: &parsedQuery{ + closeStatus: shared.WorkflowExecutionCloseStatusCompleted.Ptr(), + }, + }, + { + query: "CloseStatus = 'continuedasnew'", + expectErr: false, + parsedQuery: &parsedQuery{ + closeStatus: shared.WorkflowExecutionCloseStatusContinuedAsNew.Ptr(), + }, + }, + { + query: "CloseStatus = 'Failed' and CloseStatus = \"Failed\"", + expectErr: false, + parsedQuery: &parsedQuery{ + closeStatus: shared.WorkflowExecutionCloseStatusFailed.Ptr(), + }, + }, + { + query: "(CloseStatus = 'Timedout' and CloseStatus = \"canceled\")", + expectErr: false, + parsedQuery: &parsedQuery{ + emptyResult: true, + }, + }, + { + query: "closeStatus = \"Failed\"", + expectErr: true, + }, + { + query: "CloseStatus = \"Failed\" or CloseStatus = \"Failed\"", + expectErr: true, + }, + { + query: "CloseStatus = \"unknown\"", + expectErr: true, + }, + { + query: "CloseStatus > \"Failed\"", + expectErr: true, + }, + } + + for _, tc := range testCases { + parsedQuery, err := s.parser.Parse(tc.query) + if tc.expectErr { + s.Error(err) + continue + } + s.NoError(err) + s.Equal(tc.parsedQuery.emptyResult, parsedQuery.emptyResult) + if !tc.parsedQuery.emptyResult { + s.Equal(tc.parsedQuery.closeStatus, parsedQuery.closeStatus) + } + } +} + +func (s *queryParserSuite) TestParseCloseTime() { + testCases := []struct { + query string + expectErr bool + parsedQuery *parsedQuery + }{ + { + query: "CloseTime <= 1000", + expectErr: false, + parsedQuery: &parsedQuery{ + earliestCloseTime: 0, + latestCloseTime: 1000, + }, + }, + { + query: "CloseTime < 2000 and CloseTime <= 1000 and CloseTime > 300", + expectErr: false, + parsedQuery: &parsedQuery{ + earliestCloseTime: 301, + latestCloseTime: 1000, + }, + }, + { + query: "CloseTime = 2000 and (CloseTime > 1000 and CloseTime <= 9999)", + expectErr: false, + parsedQuery: &parsedQuery{ + earliestCloseTime: 2000, + latestCloseTime: 2000, + }, + }, + { + query: "CloseTime <= \"2019-01-01T11:11:11Z\" and CloseTime >= 1000000", + expectErr: false, + parsedQuery: &parsedQuery{ + earliestCloseTime: 1000000, + latestCloseTime: 1546341071000000000, + }, + }, + { + query: "closeTime = 2000", + expectErr: true, + }, + { + query: "CloseTime > \"2019-01-01 00:00:00\"", + expectErr: true, + }, + { + query: "CloseStatus > 2000 or CloseStatus < 1000", + expectErr: true, + }, + } + + for _, tc := range testCases { + parsedQuery, err := s.parser.Parse(tc.query) + if tc.expectErr { + s.Error(err) + continue + } + s.NoError(err) + s.Equal(tc.parsedQuery.emptyResult, parsedQuery.emptyResult) + if !tc.parsedQuery.emptyResult { + s.Equal(tc.parsedQuery.earliestCloseTime, parsedQuery.earliestCloseTime) + s.Equal(tc.parsedQuery.latestCloseTime, parsedQuery.latestCloseTime) + } + } +} + +func (s *queryParserSuite) TestParse() { + testCases := []struct { + query string + expectErr bool + parsedQuery *parsedQuery + }{ + { + query: "CloseTime <= \"2019-01-01T11:11:11Z\" and WorkflowID = 'random workflowID'", + expectErr: false, + parsedQuery: &parsedQuery{ + earliestCloseTime: 0, + latestCloseTime: 1546341071000000000, + workflowID: common.StringPtr("random workflowID"), + }, + }, + { + query: "CloseTime > 1999 and CloseTime < 10000 and RunID = 'random runID' and CloseStatus = 'Failed'", + expectErr: false, + parsedQuery: &parsedQuery{ + earliestCloseTime: 2000, + latestCloseTime: 9999, + runID: common.StringPtr("random runID"), + closeStatus: shared.WorkflowExecutionCloseStatusFailed.Ptr(), + }, + }, + { + query: "CloseTime > 2001 and CloseTime < 10000 and (RunID = 'random runID') and CloseStatus = 'Failed' and (RunID = 'another ID')", + expectErr: false, + parsedQuery: &parsedQuery{ + emptyResult: true, + }, + }, + } + + for _, tc := range testCases { + parsedQuery, err := s.parser.Parse(tc.query) + if tc.expectErr { + s.Error(err) + continue + } + s.NoError(err) + s.Equal(tc.parsedQuery.emptyResult, parsedQuery.emptyResult) + if !tc.parsedQuery.emptyResult { + s.Equal(tc.parsedQuery, parsedQuery) + } + } +} diff --git a/common/archiver/filestore/visibilityArchiver.go b/common/archiver/filestore/visibilityArchiver.go index 51848ec5be4..6e9616c0f8b 100644 --- a/common/archiver/filestore/visibilityArchiver.go +++ b/common/archiver/filestore/visibilityArchiver.go @@ -151,6 +151,10 @@ func (v *visibilityArchiver) Query( return nil, &shared.BadRequestError{Message: err.Error()} } + if parsedQuery.emptyResult { + return &archiver.QueryVisibilityResponse{}, nil + } + return v.query(ctx, URI, &queryVisibilityRequest{ domainID: request.DomainID, pageSize: request.PageSize, diff --git a/common/metrics/defs.go b/common/metrics/defs.go index d861a2791bc..c4e2a2ab9d7 100644 --- a/common/metrics/defs.go +++ b/common/metrics/defs.go @@ -898,6 +898,10 @@ const ( TaskListScavengerScope // BatcherScope is scope used by all metrics emitted by worker.Batcher module BatcherScope + // HistoryScavengerScope is scope used by all metrics emitted by worker.history.Scavenger module + HistoryScavengerScope + // ParentClosePolicyProcessorScope is scope used by all metrics emitted by worker.ParentClosePolicyProcessor + ParentClosePolicyProcessorScope NumWorkerScopes ) @@ -1291,7 +1295,9 @@ var ScopeDefs = map[ServiceIdx]map[int]scopeDefinition{ ArchiverPumpScope: {operation: "ArchiverPump"}, ArchiverArchivalWorkflowScope: {operation: "ArchiverArchivalWorkflow"}, TaskListScavengerScope: {operation: "tasklistscavenger"}, + HistoryScavengerScope: {operation: "historyscavenger"}, BatcherScope: {operation: "batcher"}, + ParentClosePolicyProcessorScope: {operation: "ParentClosePolicyProcessor"}, }, } @@ -1595,6 +1601,12 @@ const ( ExecutorTasksDroppedCount BatcherProcessorSuccess BatcherProcessorFailures + HistoryScavengerSuccessCount + HistoryScavengerErrorCount + HistoryScavengerSkipCount + ParentClosePolicyProcessorSuccess + ParentClosePolicyProcessorFailures + NumWorkerMetrics ) @@ -1876,6 +1888,11 @@ var MetricDefs = map[ServiceIdx]map[int]metricDefinition{ ExecutorTasksDroppedCount: {metricName: "executor_dropped", metricType: Counter}, BatcherProcessorSuccess: {metricName: "batcher_processor_requests", metricType: Counter}, BatcherProcessorFailures: {metricName: "batcher_processor_errors", metricType: Counter}, + HistoryScavengerSuccessCount: {metricName: "scavenger_success", metricType: Counter}, + HistoryScavengerErrorCount: {metricName: "scavenger_errors", metricType: Counter}, + HistoryScavengerSkipCount: {metricName: "scavenger_skips", metricType: Counter}, + ParentClosePolicyProcessorSuccess: {metricName: "parent_close_policy_processor_requests", metricType: Counter}, + ParentClosePolicyProcessorFailures: {metricName: "parent_close_policy_processor_errors", metricType: Counter}, }, } diff --git a/common/persistence/cassandra/cassandraPersistence.go b/common/persistence/cassandra/cassandraPersistence.go index bc48400aa46..80712aae054 100644 --- a/common/persistence/cassandra/cassandraPersistence.go +++ b/common/persistence/cassandra/cassandraPersistence.go @@ -260,6 +260,7 @@ const ( `non_retriable_errors: ?, ` + `last_failure_reason: ?, ` + `last_worker_identity: ?, ` + + `last_failure_details: ?, ` + `event_data_encoding: ?` + `}` diff --git a/common/persistence/cassandra/cassandraPersistenceUtil.go b/common/persistence/cassandra/cassandraPersistenceUtil.go index 80bd5909cf1..633f53ad198 100644 --- a/common/persistence/cassandra/cassandraPersistenceUtil.go +++ b/common/persistence/cassandra/cassandraPersistenceUtil.go @@ -1310,6 +1310,7 @@ func updateActivityInfos( a.NonRetriableErrors, a.LastFailureReason, a.LastWorkerIdentity, + a.LastFailureDetails, scheduleEncoding, shardID, rowTypeExecution, @@ -2128,6 +2129,8 @@ func createActivityInfo( info.LastFailureReason = v.(string) case "last_worker_identity": info.LastWorkerIdentity = v.(string) + case "last_failure_details": + info.LastFailureDetails = v.([]byte) case "event_data_encoding": sharedEncoding = common.EncodingType(v.(string)) } @@ -2296,6 +2299,7 @@ func resetActivityInfoMap( aInfo["non_retriable_errors"] = a.NonRetriableErrors aInfo["last_failure_reason"] = a.LastFailureReason aInfo["last_worker_identity"] = a.LastWorkerIdentity + aInfo["last_failure_details"] = a.LastFailureDetails aMap[a.ScheduleID] = aInfo } diff --git a/common/persistence/dataInterfaces.go b/common/persistence/dataInterfaces.go index 26f6946ad23..1aedc51e906 100644 --- a/common/persistence/dataInterfaces.go +++ b/common/persistence/dataInterfaces.go @@ -22,6 +22,7 @@ package persistence import ( "fmt" + "strings" "time" "github.com/pborman/uuid" @@ -179,6 +180,8 @@ const ( invalidStateTransitionMsg = "unable to change workflow state from %v to %v, close status %v" ) +const numItemsInGarbageInfo = 3 + type ( // InvalidPersistenceRequestError represents invalid request to persistence InvalidPersistenceRequestError struct { @@ -669,6 +672,7 @@ type ( NonRetriableErrors []string LastFailureReason string LastWorkerIdentity string + LastFailureDetails []byte // Not written to database - This is used only for deduping heartbeat timer creation LastHeartbeatTimeoutVisibility int64 } @@ -2461,6 +2465,20 @@ func NewHistoryBranchToken(treeID string) ([]byte, error) { return token, nil } +// NewHistoryBranchTokenByBranchID return a new branch token with treeID/branchID +func NewHistoryBranchTokenByBranchID(treeID, branchID string) ([]byte, error) { + bi := &workflow.HistoryBranch{ + TreeID: &treeID, + BranchID: &branchID, + Ancestors: []*workflow.HistoryBranchRange{}, + } + token, err := internalThriftEncoder.Encode(bi) + if err != nil { + return nil, err + } + return token, nil +} + // NewHistoryBranchTokenFromAnother make up a branchToken func NewHistoryBranchTokenFromAnother(branchID string, anotherToken []byte) ([]byte, error) { var branch workflow.HistoryBranch @@ -2480,3 +2498,22 @@ func NewHistoryBranchTokenFromAnother(branchID string, anotherToken []byte) ([]b } return token, nil } + +// BuildHistoryGarbageCleanupInfo combine the workflow identity information into a string +func BuildHistoryGarbageCleanupInfo(domainID, workflowID, runID string) string { + return fmt.Sprintf("%v:%v:%v", domainID, workflowID, runID) +} + +// SplitHistoryGarbageCleanupInfo returns workflow identity information +func SplitHistoryGarbageCleanupInfo(info string) (domainID, workflowID, runID string, err error) { + ss := strings.Split(info, ":") + // workflowID can contain ":" so len(ss) can be greater than 3 + if len(ss) < numItemsInGarbageInfo { + return "", "", "", fmt.Errorf("not able to split info for %s", info) + } + domainID = ss[0] + runID = ss[len(ss)-1] + workflowEnd := len(info) - len(runID) - 1 + workflowID = info[len(domainID)+1 : workflowEnd] + return +} diff --git a/common/persistence/executionStore.go b/common/persistence/executionStore.go index b3eb5953d18..e0f74eb6de5 100644 --- a/common/persistence/executionStore.go +++ b/common/persistence/executionStore.go @@ -292,6 +292,7 @@ func (m *executionManagerImpl) DeserializeActivityInfos( NonRetriableErrors: v.NonRetriableErrors, LastFailureReason: v.LastFailureReason, LastWorkerIdentity: v.LastWorkerIdentity, + LastFailureDetails: v.LastFailureDetails, LastHeartbeatTimeoutVisibility: v.LastHeartbeatTimeoutVisibility, } newInfos[k] = a @@ -411,6 +412,7 @@ func (m *executionManagerImpl) SerializeUpsertActivityInfos( NonRetriableErrors: v.NonRetriableErrors, LastFailureReason: v.LastFailureReason, LastWorkerIdentity: v.LastWorkerIdentity, + LastFailureDetails: v.LastFailureDetails, LastHeartbeatTimeoutVisibility: v.LastHeartbeatTimeoutVisibility, } newInfos = append(newInfos, i) diff --git a/common/persistence/persistence-tests/executionManagerTest.go b/common/persistence/persistence-tests/executionManagerTest.go index d2bb458f95a..c67463db353 100644 --- a/common/persistence/persistence-tests/executionManagerTest.go +++ b/common/persistence/persistence-tests/executionManagerTest.go @@ -2429,6 +2429,7 @@ func (s *ExecutionManagerSuite) TestWorkflowMutableStateActivities() { NonRetriableErrors: []string{"accessDenied", "badRequest"}, LastFailureReason: "some random error", LastWorkerIdentity: uuid.New(), + LastFailureDetails: []byte(uuid.New()), }} err2 := s.UpdateWorkflowExecution(updatedInfo, updatedStats, nil, []int64{int64(4)}, nil, int64(3), nil, activityInfos, nil, nil, nil) s.NoError(err2) @@ -2473,6 +2474,7 @@ func (s *ExecutionManagerSuite) TestWorkflowMutableStateActivities() { s.Equal(activityInfos[0].NonRetriableErrors, ai.NonRetriableErrors) s.Equal(activityInfos[0].LastFailureReason, ai.LastFailureReason) s.Equal(activityInfos[0].LastWorkerIdentity, ai.LastWorkerIdentity) + s.Equal(activityInfos[0].LastFailureDetails, ai.LastFailureDetails) err2 = s.UpdateWorkflowExecution(updatedInfo, updatedStats, nil, nil, nil, int64(5), nil, nil, []int64{1}, nil, nil) s.NoError(err2) diff --git a/common/persistence/persistence-tests/shared_test.go b/common/persistence/persistence-tests/shared_test.go new file mode 100644 index 00000000000..d138e519220 --- /dev/null +++ b/common/persistence/persistence-tests/shared_test.go @@ -0,0 +1,50 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package persistencetests + +import ( + "github.com/uber/cadence/common/persistence" + "testing" +) + +func TestGarbageCleanupInfo(t *testing.T) { + domainID := "10000000-5000-f000-f000-000000000000" + workflowID := "workflow-id" + runID := "10000000-5000-f000-f000-000000000002" + + info := persistence.BuildHistoryGarbageCleanupInfo(domainID, workflowID, runID) + domainID2, workflowID2, runID2, err := persistence.SplitHistoryGarbageCleanupInfo(info) + if err != nil || domainID != domainID2 || workflowID != workflowID2 || runID != runID2 { + t.Fail() + } +} + +func TestGarbageCleanupInfo_WithColonInWorklfowID(t *testing.T) { + domainID := "10000000-5000-f000-f000-000000000000" + workflowID := "workflow-id:2" + runID := "10000000-5000-f000-f000-000000000002" + + info := persistence.BuildHistoryGarbageCleanupInfo(domainID, workflowID, runID) + domainID2, workflowID2, runID2, err := persistence.SplitHistoryGarbageCleanupInfo(info) + if err != nil || domainID != domainID2 || workflowID != workflowID2 || runID != runID2 { + t.Fail() + } +} diff --git a/common/persistence/persistenceInterface.go b/common/persistence/persistenceInterface.go index 7a86a34b61e..eff9c29f0b6 100644 --- a/common/persistence/persistenceInterface.go +++ b/common/persistence/persistenceInterface.go @@ -281,6 +281,7 @@ type ( NonRetriableErrors []string LastFailureReason string LastWorkerIdentity string + LastFailureDetails []byte // Not written to database - This is used only for deduping heartbeat timer creation LastHeartbeatTimeoutVisibility int64 } diff --git a/common/persistence/sql/workflowStateMaps.go b/common/persistence/sql/workflowStateMaps.go index 74c8fceef7a..1e99b3b4d8b 100644 --- a/common/persistence/sql/workflowStateMaps.go +++ b/common/persistence/sql/workflowStateMaps.go @@ -79,6 +79,7 @@ func updateActivityInfos( RetryNonRetryableErrors: v.NonRetriableErrors, RetryLastFailureReason: &v.LastFailureReason, RetryLastWorkerIdentity: &v.LastWorkerIdentity, + RetryLastFailureDetails: v.LastFailureDetails, } blob, err := activityInfoToBlob(info) if err != nil { @@ -193,6 +194,7 @@ func getActivityInfoMap( NonRetriableErrors: decoded.GetRetryNonRetryableErrors(), LastFailureReason: decoded.GetRetryLastFailureReason(), LastWorkerIdentity: decoded.GetRetryLastWorkerIdentity(), + LastFailureDetails: decoded.GetRetryLastFailureDetails(), } if decoded.StartedEvent != nil { info.StartedEvent = persistence.NewDataBlob(decoded.StartedEvent, common.EncodingType(decoded.GetStartedEventEncoding())) diff --git a/common/service/dynamicconfig/constants.go b/common/service/dynamicconfig/constants.go index 8db0ac2a7e5..8a102b2d400 100644 --- a/common/service/dynamicconfig/constants.go +++ b/common/service/dynamicconfig/constants.go @@ -66,6 +66,7 @@ var keys = map[Key]string{ MinRetentionDays: "system.minRetentionDays", MaxDecisionStartToCloseSeconds: "system.maxDecisionStartToCloseSeconds", EnableBatcher: "worker.enableBatcher", + EnableParentClosePolicyWorker: "system.enableParentClosePolicyWorker", // size limit BlobSizeLimitError: "limit.blobSize.error", @@ -186,6 +187,8 @@ var keys = map[Key]string{ HistoryThrottledLogRPS: "history.throttledLogRPS", StickyTTL: "history.stickyTTL", DecisionHeartbeatTimeout: "history.decisionHeartbeatTimeout", + ParentClosePolicyThreshold: "history.parentClosePolicyThreshold", + NumParentClosePolicySystemWorkflows: "history.numParentClosePolicySystemWorkflows", WorkerPersistenceMaxQPS: "worker.persistenceMaxQPS", WorkerReplicatorMetaTaskConcurrency: "worker.replicatorMetaTaskConcurrency", @@ -492,6 +495,11 @@ const ( EnableEventsV2 // UseTerminateAsDefaultParentClosePolicy whether to use Terminate as default ParentClosePolicy, otherwise use Abandon for backward compatibility UseTerminateAsDefaultParentClosePolicy + // ParentClosePolicyThreshold decides that parent close policy will be processed by sys workers(if enabled) if + // the number of children greater than or equal to this threshold + ParentClosePolicyThreshold + // NumParentClosePolicySystemWorkflows is key for number of parentClosePolicy system workflows running in total + NumParentClosePolicySystemWorkflows // HistoryThrottledLogRPS is the rate limit on number of log messages emitted per second for throttled logger HistoryThrottledLogRPS @@ -550,6 +558,8 @@ const ( ScannerPersistenceMaxQPS // EnableBatcher decides whether start batcher in our worker EnableBatcher + // EnableParentClosePolicyWorker decides whether or not enable system workers for processing parent close policy task + EnableParentClosePolicyWorker // lastKeyForTest must be the last one in this const group for testing purpose lastKeyForTest diff --git a/docker/config_template.yaml b/docker/config_template.yaml index 9226db344c4..4b2d315afe1 100644 --- a/docker/config_template.yaml +++ b/docker/config_template.yaml @@ -130,15 +130,24 @@ dcRedirectionPolicy: toDC: "" archival: + history: status: "enabled" - enableReadFromArchival: true - defaultBucket: "cadence-development" - filestore: - storeDirectory: "/tmp/development/blobstore/" - defaultBucket: "cadence-development" - customBuckets: - - "custom-bucket-1" - - "custom-bucket-2" + enableRead: true + provider: + filestore: + fileMode: "0666" + dirMode: "0766" + visibility: + status: "disabled" + enableRead: false + +domainDefaults: + archival: + history: + status: "enabled" + URI: "file:///tmp/cadence_archival/development" + visibility: + status: "disabled" kafka: tls: diff --git a/go.mod b/go.mod index cf544e5fc79..1a11b31f1a0 100644 --- a/go.mod +++ b/go.mod @@ -69,8 +69,8 @@ require ( github.com/uber-go/kafka-client v0.2.2 github.com/uber-go/mapdecode v1.0.0 // indirect github.com/uber-go/tally v3.3.11+incompatible - github.com/uber/jaeger-client-go v2.16.0+incompatible // indirect - github.com/uber/jaeger-lib v2.0.0+incompatible // indirect + github.com/uber/jaeger-client-go v2.15.0+incompatible // indirect + github.com/uber/jaeger-lib v1.5.0 // indirect github.com/uber/ringpop-go v0.8.5 github.com/uber/tchannel-go v1.14.0 github.com/urfave/cli v1.20.0 diff --git a/go.sum b/go.sum index 3a9e006872f..1be74769e2a 100644 --- a/go.sum +++ b/go.sum @@ -223,10 +223,10 @@ github.com/uber-go/mapdecode v1.0.0 h1:euUEFM9KnuCa1OBixz1xM+FIXmpixyay5DLymceOV github.com/uber-go/mapdecode v1.0.0/go.mod h1:b5nP15FwXTgpjTjeA9A2uTHXV5UJCl4arwKpP0FP1Hw= github.com/uber-go/tally v3.3.11+incompatible h1:b6xn/zbXCPFID3p2P9nUlHWyrNZ3e3U35Ra1/gDR63I= github.com/uber-go/tally v3.3.11+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= -github.com/uber/jaeger-client-go v2.16.0+incompatible h1:Q2Pp6v3QYiocMxomCaJuwQGFt7E53bPYqEgug/AoBtY= -github.com/uber/jaeger-client-go v2.16.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.0.0+incompatible h1:iMSCV0rmXEogjNWPh2D0xk9YVKvrtGoHJNe9ebLu/pw= -github.com/uber/jaeger-lib v2.0.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/uber/jaeger-client-go v2.15.0+incompatible h1:NP3qsSqNxh8VYr956ur1N/1C1PjvOJnJykCzcD5QHbk= +github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v1.5.0 h1:OHbgr8l656Ub3Fw5k9SWnBfIEwvoHQ+W2y+Aa9D1Uyo= +github.com/uber/jaeger-lib v1.5.0/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/uber/ringpop-go v0.8.5 h1:aBa/SHmmFRcAXA63k7uBheoTL8tCmH7L+OgktB1AF/o= github.com/uber/ringpop-go v0.8.5/go.mod h1:zVI6eGO6L7pG14GkntHsSOfmUAWQ7B4lvmzly4IT4ls= github.com/uber/tchannel-go v1.14.0 h1:v5mYnfCSI+H76umzo17+o3YdrnUt5W1AcvV+47065B0= @@ -241,8 +241,6 @@ github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 h1:zzrxE1FKn5ryB github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2/go.mod h1:hzfGeIUDq/j97IG+FhNqkowIyEcD88LrW6fyU3K3WqY= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/cadence v0.0.0-20190702231331-27b0ba2bc456 h1:4nI6vqmnHOgQyeDDepZghHE4YYQuxXFnJvPZA8JKGjs= -go.uber.org/cadence v0.0.0-20190702231331-27b0ba2bc456/go.mod h1:CQivfHCJ44B1kKL4LLtOhcepUwNoRodZceo/wU5Nthw= go.uber.org/cadence v0.9.1-0.20190819173509-c3cd9f8f9745 h1:UfkGg9w9lbfmLNxHMFp5UlCqzqw/kAZglCSAE2v6aeY= go.uber.org/cadence v0.9.1-0.20190819173509-c3cd9f8f9745/go.mod h1:CQivfHCJ44B1kKL4LLtOhcepUwNoRodZceo/wU5Nthw= go.uber.org/dig v1.7.0 h1:E5/L92iQTNJTjfgJF2KgU+/JpMaiuvK2DHLBj0+kSZk= diff --git a/host/activity_test.go b/host/activity_test.go index e4e01a54c75..ed5b98fdc94 100644 --- a/host/activity_test.go +++ b/host/activity_test.go @@ -307,8 +307,11 @@ func (s *integrationSuite) TestActivityHeartbeatDetailsDuringRetry() { s.Equal(workflow.PendingActivityStateScheduled, pendingActivity.GetState()) if i == 0 { s.Equal("cadenceInternal:Timeout HEARTBEAT", pendingActivity.GetLastFailureReason()) + s.Nil(pendingActivity.GetLastFailureDetails()) } else { // i == 1 - s.Equal("retryable-error", pendingActivity.GetLastFailureReason()) + expectedErrString := "retryable-error" + s.Equal(expectedErrString, pendingActivity.GetLastFailureReason()) + s.Equal([]byte(expectedErrString), pendingActivity.GetLastFailureDetails()) } s.Equal(identity, pendingActivity.GetLastWorkerIdentity()) @@ -504,7 +507,9 @@ func (s *integrationSuite) TestActivityRetry() { s.Nil(err) for _, pendingActivity := range descResp.GetPendingActivities() { if pendingActivity.GetActivityID() == "A" { - s.Equal("bad-luck-please-retry", pendingActivity.GetLastFailureReason()) + expectedErrString := "bad-luck-please-retry" + s.Equal(expectedErrString, pendingActivity.GetLastFailureReason()) + s.Equal([]byte(expectedErrString), pendingActivity.GetLastFailureDetails()) s.Equal(identity, pendingActivity.GetLastWorkerIdentity()) } } @@ -516,7 +521,9 @@ func (s *integrationSuite) TestActivityRetry() { s.Nil(err) for _, pendingActivity := range descResp.GetPendingActivities() { if pendingActivity.GetActivityID() == "A" { - s.Equal("bad-bug", pendingActivity.GetLastFailureReason()) + expectedErrString := "bad-bug" + s.Equal(expectedErrString, pendingActivity.GetLastFailureReason()) + s.Equal([]byte(expectedErrString), pendingActivity.GetLastFailureDetails()) s.Equal(identity2, pendingActivity.GetLastWorkerIdentity()) } } diff --git a/host/archival_test.go b/host/archival_test.go index 341c6c2a684..7f926477325 100644 --- a/host/archival_test.go +++ b/host/archival_test.go @@ -23,6 +23,7 @@ package host import ( "bytes" "encoding/binary" + "fmt" "strconv" "time" @@ -34,7 +35,7 @@ import ( ) const ( - retryLimit = 10 + retryLimit = 20 retryBackoffTime = 200 * time.Millisecond ) @@ -99,50 +100,50 @@ func (s *integrationSuite) TestArchival_ArchiverWorker() { s.True(s.isMutableStateDeleted(domainID, execution)) } -// TODO: uncomment this test after queryParser for sql is implemented -// func (s *integrationSuite) TestVisibilityArchival() { -// s.True(s.testCluster.archiverBase.metadata.GetVisibilityConfig().ClusterConfiguredForArchival()) - -// domainID := s.getDomainID(s.archivalDomainName) -// workflowID := "archival-visibility-workflow-id" -// workflowType := "archival-visibility-workflow-type" -// taskList := "archival-visibility-task-list" -// numActivities := 3 -// numRuns := 5 -// startTime := time.Now().UnixNano() -// s.startAndFinishWorkflow(workflowID, workflowType, taskList, s.archivalDomainName, domainID, numActivities, numRuns) -// s.startAndFinishWorkflow("some other workflowID", "some other workflow type", taskList, s.archivalDomainName, domainID, numActivities, numRuns) -// endTime := time.Now().UnixNano() - -// var executions []*workflow.WorkflowExecutionInfo -// request := &workflow.ListArchivedWorkflowExecutionsRequest{ -// Domain: common.StringPtr(s.archivalDomainName), -// MaximumPageSize: common.Int32Ptr(2), -// CloseTimeFilter: &workflow.StartTimeFilter{ -// EarliestTime: common.Int64Ptr(startTime), -// LatestTime: common.Int64Ptr(endTime), -// }, -// TypeFilter: &workflow.WorkflowTypeFilter{ -// Name: common.StringPtr(workflowType), -// }, -// } -// for len(executions) == 0 || request.NextPageToken != nil { -// response, err := s.engine.ListArchivedWorkflowExecutions(createContext(), request) -// s.NoError(err) -// s.NotNil(response) -// executions = append(executions, response.GetExecutions()...) -// request.NextPageToken = response.NextPageToken -// } - -// s.Len(executions, numRuns) -// for _, execution := range executions { -// s.Equal(workflowID, execution.GetExecution().GetWorkflowId()) -// s.Equal(workflowType, execution.GetType().GetName()) -// s.NotZero(execution.StartTime) -// s.NotZero(execution.ExecutionTime) -// s.NotZero(execution.CloseTime) -// } -// } +func (s *integrationSuite) TestVisibilityArchival() { + s.True(s.testCluster.archiverBase.metadata.GetVisibilityConfig().ClusterConfiguredForArchival()) + + domainID := s.getDomainID(s.archivalDomainName) + workflowID := "archival-visibility-workflow-id" + workflowType := "archival-visibility-workflow-type" + taskList := "archival-visibility-task-list" + numActivities := 3 + numRuns := 5 + startTime := time.Now().UnixNano() + s.startAndFinishWorkflow(workflowID, workflowType, taskList, s.archivalDomainName, domainID, numActivities, numRuns) + s.startAndFinishWorkflow("some other workflowID", "some other workflow type", taskList, s.archivalDomainName, domainID, numActivities, numRuns) + endTime := time.Now().UnixNano() + + var executions []*workflow.WorkflowExecutionInfo + + for i := 0; i != retryLimit; i++ { + executions = []*workflow.WorkflowExecutionInfo{} + request := &workflow.ListArchivedWorkflowExecutionsRequest{ + Domain: common.StringPtr(s.archivalDomainName), + PageSize: common.Int32Ptr(2), + Query: common.StringPtr(fmt.Sprintf("CloseTime >= %v and CloseTime <= %v and WorkflowType = '%s'", startTime, endTime, workflowType)), + } + for len(executions) == 0 || request.NextPageToken != nil { + response, err := s.engine.ListArchivedWorkflowExecutions(createContext(), request) + s.NoError(err) + s.NotNil(response) + executions = append(executions, response.GetExecutions()...) + request.NextPageToken = response.NextPageToken + } + if len(executions) == numRuns { + break + } + time.Sleep(retryBackoffTime) + } + + for _, execution := range executions { + s.Equal(workflowID, execution.GetExecution().GetWorkflowId()) + s.Equal(workflowType, execution.GetType().GetName()) + s.NotZero(execution.StartTime) + s.NotZero(execution.ExecutionTime) + s.NotZero(execution.CloseTime) + } +} func (s *integrationSuite) getDomainID(domain string) string { domainResp, err := s.engine.DescribeDomain(createContext(), &workflow.DescribeDomainRequest{ diff --git a/host/taskpoller.go b/host/taskpoller.go index 1f41b3d481c..84b224a50f4 100644 --- a/host/taskpoller.go +++ b/host/taskpoller.go @@ -364,6 +364,7 @@ retry: return p.Engine.RespondActivityTaskFailed(createContext(), &workflow.RespondActivityTaskFailedRequest{ TaskToken: response.TaskToken, Reason: common.StringPtr(err2.Error()), + Details: []byte(err2.Error()), Identity: common.StringPtr(p.Identity), }) } @@ -434,6 +435,7 @@ retry: RunID: common.StringPtr(response.WorkflowExecution.GetRunId()), ActivityID: common.StringPtr(response.GetActivityId()), Reason: common.StringPtr(err2.Error()), + Details: []byte(err2.Error()), Identity: common.StringPtr(p.Identity), }) } diff --git a/idl/github.com/uber/cadence/history.thrift b/idl/github.com/uber/cadence/history.thrift index 4fbef99441c..1fb86f1a252 100644 --- a/idl/github.com/uber/cadence/history.thrift +++ b/idl/github.com/uber/cadence/history.thrift @@ -330,6 +330,7 @@ struct SyncActivityRequest { 110: optional i32 attempt 120: optional string lastFailureReason 130: optional string lastWorkerIdentity + 140: optional binary lastFailureDetails } struct QueryWorkflowRequest { diff --git a/idl/github.com/uber/cadence/replicator.thrift b/idl/github.com/uber/cadence/replicator.thrift index 2a98930428a..47b2015f4b2 100644 --- a/idl/github.com/uber/cadence/replicator.thrift +++ b/idl/github.com/uber/cadence/replicator.thrift @@ -92,6 +92,7 @@ struct SyncActicvityTaskAttributes { 110: optional i32 attempt 120: optional string lastFailureReason 130: optional string lastWorkerIdentity + 140: optional binary lastFailureDetails } struct HistoryTaskV2Attributes { diff --git a/idl/github.com/uber/cadence/shared.thrift b/idl/github.com/uber/cadence/shared.thrift index 2c93f3e7a92..0befcf5aacd 100644 --- a/idl/github.com/uber/cadence/shared.thrift +++ b/idl/github.com/uber/cadence/shared.thrift @@ -1412,6 +1412,7 @@ struct PendingActivityInfo { 100: optional i64 (js.type = "Long") expirationTimestamp 110: optional string lastFailureReason 120: optional string lastWorkerIdentity + 130: optional binary lastFailureDetails } struct PendingChildExecutionInfo { diff --git a/idl/github.com/uber/cadence/sqlblobs.thrift b/idl/github.com/uber/cadence/sqlblobs.thrift index 01e91fd1f13..ccd65d8122b 100644 --- a/idl/github.com/uber/cadence/sqlblobs.thrift +++ b/idl/github.com/uber/cadence/sqlblobs.thrift @@ -164,6 +164,7 @@ struct ActivityInfo { 64: optional list retryNonRetryableErrors 66: optional string retryLastFailureReason 68: optional string retryLastWorkerIdentity + 70: optional binary retryLastFailureDetails } struct ChildExecutionInfo { diff --git a/schema/cassandra/cadence/schema.cql b/schema/cassandra/cadence/schema.cql index 7fe0513135b..3be2b0ab999 100644 --- a/schema/cassandra/cadence/schema.cql +++ b/schema/cassandra/cadence/schema.cql @@ -178,6 +178,7 @@ CREATE TYPE activity_info ( non_retriable_errors list, last_failure_reason text, last_worker_identity text, -- Worker that returns the last failure reason + last_failure_details blob, event_data_encoding text, -- Protocol used for history serialization ); diff --git a/schema/cassandra/cadence/versioned/v0.22/activity_last_failure_details.cql b/schema/cassandra/cadence/versioned/v0.22/activity_last_failure_details.cql new file mode 100644 index 00000000000..56b052b5001 --- /dev/null +++ b/schema/cassandra/cadence/versioned/v0.22/activity_last_failure_details.cql @@ -0,0 +1 @@ +ALTER TYPE activity_info ADD last_failure_details blob; \ No newline at end of file diff --git a/schema/cassandra/cadence/versioned/v0.22/manifest.json b/schema/cassandra/cadence/versioned/v0.22/manifest.json index 38cf69e626f..426693494a0 100644 --- a/schema/cassandra/cadence/versioned/v0.22/manifest.json +++ b/schema/cassandra/cadence/versioned/v0.22/manifest.json @@ -1,10 +1,11 @@ { "CurrVersion": "0.22", "MinCompatibleVersion": "0.22", - "Description": "Add per cluster replication level (last replicated task_id) to shard info", + "Description": "Add per cluster replication level (last replicated task_id) to shard info and last failure details to activity info", "SchemaUpdateCqlFiles": [ "request_cancel_signal_batch_event_id.cql", "cluster_replication_level.cql", - "parent_close_policy.cql" + "parent_close_policy.cql", + "activity_last_failure_details.cql" ] } \ No newline at end of file diff --git a/schema/elasticsearch/visibility/index_template.json b/schema/elasticsearch/visibility/index_template.json index c534843003a..67cfdc4e288 100644 --- a/schema/elasticsearch/visibility/index_template.json +++ b/schema/elasticsearch/visibility/index_template.json @@ -56,7 +56,10 @@ "environment": { "type": "keyword"}, "addon": { "type": "keyword"}, "addon-type": { "type": "keyword"}, - "user": { "type": "keyword"} + "user": { "type": "keyword"}, + "CustomDomain": { "type": "keyword"}, + "Operator": { "type": "keyword"}, + "RolloutID": { "type": "keyword"} } } } diff --git a/service/frontend/workflowHandler.go b/service/frontend/workflowHandler.go index b63efe19d8a..6724412d857 100644 --- a/service/frontend/workflowHandler.go +++ b/service/frontend/workflowHandler.go @@ -358,10 +358,6 @@ func (wh *WorkflowHandler) PollForActivityTask( return nil, wh.error(errRequestNotSet, scope) } - if ok := wh.allow(pollRequest); !ok { - return nil, wh.error(createServiceBusyError(), scope) - } - wh.Service.GetLogger().Debug("Received PollForActivityTask") if err := common.ValidateLongPollContextTimeout( ctx, @@ -442,10 +438,6 @@ func (wh *WorkflowHandler) PollForDecisionTask( return nil, wh.error(errRequestNotSet, scope) } - if ok := wh.allow(pollRequest); !ok { - return nil, wh.error(createServiceBusyError(), scope) - } - wh.Service.GetLogger().Debug("Received PollForDecisionTask") if err := common.ValidateLongPollContextTimeout( ctx, diff --git a/service/history/MockMutableState.go b/service/history/MockMutableState.go index 6f8a13623d1..3eafbc45d80 100644 --- a/service/history/MockMutableState.go +++ b/service/history/MockMutableState.go @@ -1175,20 +1175,20 @@ func (_m *mockMutableState) CopyToPersistence() *persistence.WorkflowMutableStat return r0 } -// CreateActivityRetryTimer provides a mock function with given fields: _a0, _a1 -func (_m *mockMutableState) RetryActivity(_a0 *persistence.ActivityInfo, _a1 string) (bool, error) { - ret := _m.Called(_a0, _a1) +// RetryActivity provides a mock function with given fields: ai, failureReason, failureDetails +func (_m *mockMutableState) RetryActivity(ai *persistence.ActivityInfo, failureReason string, failureDetails []byte) (bool, error) { + ret := _m.Called(ai, failureReason, failureDetails) var r0 bool - if rf, ok := ret.Get(0).(func(*persistence.ActivityInfo, string) bool); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(*persistence.ActivityInfo, string, []byte) bool); ok { + r0 = rf(ai, failureReason, failureDetails) } else { r0 = ret.Get(0).(bool) } var r1 error - if rf, ok := ret.Get(1).(func(*persistence.ActivityInfo, string) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(*persistence.ActivityInfo, string, []byte) error); ok { + r1 = rf(ai, failureReason, failureDetails) } else { r1 = ret.Error(1) } @@ -1526,6 +1526,20 @@ func (_m *mockMutableState) GetCurrentVersion() int64 { return r0 } +// GetDomainName provides a mock function with given fields: +func (_m *mockMutableState) GetDomainName() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + // GetEventStoreVersion provides a mock function with given fields: func (_m *mockMutableState) GetEventStoreVersion() int32 { ret := _m.Called() @@ -2472,13 +2486,13 @@ func (_m *mockMutableState) ReplicateRequestCancelExternalWorkflowExecutionFaile return r0 } -// ReplicateRequestCancelExternalWorkflowExecutionInitiatedEvent provides a mock function with given fields: _a0, _a1 -func (_m *mockMutableState) ReplicateRequestCancelExternalWorkflowExecutionInitiatedEvent(_a0 *shared.HistoryEvent, _a1 string) (*persistence.RequestCancelInfo, error) { - ret := _m.Called(_a0, _a1) +// ReplicateRequestCancelExternalWorkflowExecutionInitiatedEvent provides a mock function with given fields: _a0, _a1, _a2 +func (_m *mockMutableState) ReplicateRequestCancelExternalWorkflowExecutionInitiatedEvent(_a0 int64, _a1 *shared.HistoryEvent, _a2 string) (*persistence.RequestCancelInfo, error) { + ret := _m.Called(_a0, _a1, _a2) var r0 *persistence.RequestCancelInfo - if rf, ok := ret.Get(0).(func(*shared.HistoryEvent, string) *persistence.RequestCancelInfo); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(int64, *shared.HistoryEvent, string) *persistence.RequestCancelInfo); ok { + r0 = rf(_a0, _a1, _a2) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*persistence.RequestCancelInfo) @@ -2486,8 +2500,8 @@ func (_m *mockMutableState) ReplicateRequestCancelExternalWorkflowExecutionIniti } var r1 error - if rf, ok := ret.Get(1).(func(*shared.HistoryEvent, string) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(int64, *shared.HistoryEvent, string) error); ok { + r1 = rf(_a0, _a1, _a2) } else { r1 = ret.Error(1) } @@ -2509,13 +2523,13 @@ func (_m *mockMutableState) ReplicateSignalExternalWorkflowExecutionFailedEvent( return r0 } -// ReplicateSignalExternalWorkflowExecutionInitiatedEvent provides a mock function with given fields: _a0, _a1 -func (_m *mockMutableState) ReplicateSignalExternalWorkflowExecutionInitiatedEvent(_a0 *shared.HistoryEvent, _a1 string) (*persistence.SignalInfo, error) { - ret := _m.Called(_a0, _a1) +// ReplicateSignalExternalWorkflowExecutionInitiatedEvent provides a mock function with given fields: _a0, _a1, _a2 +func (_m *mockMutableState) ReplicateSignalExternalWorkflowExecutionInitiatedEvent(_a0 int64, _a1 *shared.HistoryEvent, _a2 string) (*persistence.SignalInfo, error) { + ret := _m.Called(_a0, _a1, _a2) var r0 *persistence.SignalInfo - if rf, ok := ret.Get(0).(func(*shared.HistoryEvent, string) *persistence.SignalInfo); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(int64, *shared.HistoryEvent, string) *persistence.SignalInfo); ok { + r0 = rf(_a0, _a1, _a2) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*persistence.SignalInfo) @@ -2523,8 +2537,8 @@ func (_m *mockMutableState) ReplicateSignalExternalWorkflowExecutionInitiatedEve } var r1 error - if rf, ok := ret.Get(1).(func(*shared.HistoryEvent, string) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(int64, *shared.HistoryEvent, string) error); ok { + r1 = rf(_a0, _a1, _a2) } else { r1 = ret.Error(1) } diff --git a/service/history/historyEngine.go b/service/history/historyEngine.go index 7a1e9243d91..e066aac1af0 100644 --- a/service/history/historyEngine.go +++ b/service/history/historyEngine.go @@ -85,6 +85,7 @@ type ( archivalClient warchiver.Client resetor workflowResetor replicationTaskProcessors []*ReplicationTaskProcessor + publicClient workflowserviceclient.Interface } ) @@ -176,6 +177,7 @@ func NewEngineWithShardContext( shard.GetConfig().ArchiveRequestRPS, shard.GetService().GetArchiverProvider(), ), + publicClient: publicClient, } historyEngImpl.txProcessor = newTransferQueueProcessor(shard, historyEngImpl, visibilityMgr, matching, historyClient, logger) @@ -962,6 +964,7 @@ func (e *historyEngineImpl) DescribeWorkflowExecution( } if ai.LastFailureReason != "" { p.LastFailureReason = common.StringPtr(ai.LastFailureReason) + p.LastFailureDetails = ai.LastFailureDetails } if ai.LastWorkerIdentity != "" { p.LastWorkerIdentity = common.StringPtr(ai.LastWorkerIdentity) @@ -1219,7 +1222,7 @@ func (e *historyEngineImpl) RespondActivityTaskFailed( } postActions := &updateWorkflowAction{} - ok, err := msBuilder.RetryActivity(ai, req.FailedRequest.GetReason()) + ok, err := msBuilder.RetryActivity(ai, req.FailedRequest.GetReason(), req.FailedRequest.GetDetails()) if err != nil { return nil, err } diff --git a/service/history/historyEngine_test.go b/service/history/historyEngine_test.go index 7371832b099..d056ad7d014 100644 --- a/service/history/historyEngine_test.go +++ b/service/history/historyEngine_test.go @@ -5653,6 +5653,7 @@ func copyActivityInfo(sourceInfo *persistence.ActivityInfo) *persistence.Activit NonRetriableErrors: sourceInfo.NonRetriableErrors, LastFailureReason: sourceInfo.LastFailureReason, LastWorkerIdentity: sourceInfo.LastWorkerIdentity, + LastFailureDetails: sourceInfo.LastFailureDetails, //// Not written to database - This is used only for deduping heartbeat timer creation // LastHeartbeatTimeoutVisibility: sourceInfo.LastHeartbeatTimeoutVisibility, } diff --git a/service/history/mutableState.go b/service/history/mutableState.go index 8e58334660c..fafb91e98a6 100644 --- a/service/history/mutableState.go +++ b/service/history/mutableState.go @@ -101,7 +101,7 @@ type ( ClearStickyness() CheckResettable() error CopyToPersistence() *persistence.WorkflowMutableState - RetryActivity(*persistence.ActivityInfo, string) (bool, error) + RetryActivity(ai *persistence.ActivityInfo, failureReason string, failureDetails []byte) (bool, error) CreateNewHistoryEvent(eventType workflow.EventType) *workflow.HistoryEvent CreateNewHistoryEventWithTimestamp(eventType workflow.EventType, timestamp int64) *workflow.HistoryEvent CreateTransientDecisionEvents(di *decisionInfo, identity string) (*workflow.HistoryEvent, *workflow.HistoryEvent) @@ -121,6 +121,7 @@ type ( GetChildExecutionInitiatedEvent(int64) (*workflow.HistoryEvent, error) GetCompletionEvent() (*workflow.HistoryEvent, error) GetDecisionInfo(int64) (*decisionInfo, bool) + GetDomainName() string GetStartEvent() (*workflow.HistoryEvent, error) GetCurrentBranchToken() ([]byte, error) GetVersionHistories() *persistence.VersionHistories @@ -182,9 +183,9 @@ type ( ReplicateExternalWorkflowExecutionCancelRequested(*workflow.HistoryEvent) error ReplicateExternalWorkflowExecutionSignaled(*workflow.HistoryEvent) error ReplicateRequestCancelExternalWorkflowExecutionFailedEvent(*workflow.HistoryEvent) error - ReplicateRequestCancelExternalWorkflowExecutionInitiatedEvent(*workflow.HistoryEvent, string) (*persistence.RequestCancelInfo, error) + ReplicateRequestCancelExternalWorkflowExecutionInitiatedEvent(int64, *workflow.HistoryEvent, string) (*persistence.RequestCancelInfo, error) ReplicateSignalExternalWorkflowExecutionFailedEvent(*workflow.HistoryEvent) error - ReplicateSignalExternalWorkflowExecutionInitiatedEvent(*workflow.HistoryEvent, string) (*persistence.SignalInfo, error) + ReplicateSignalExternalWorkflowExecutionInitiatedEvent(int64, *workflow.HistoryEvent, string) (*persistence.SignalInfo, error) ReplicateStartChildWorkflowExecutionFailedEvent(*workflow.HistoryEvent) error ReplicateStartChildWorkflowExecutionInitiatedEvent(int64, *workflow.HistoryEvent, string) (*persistence.ChildExecutionInfo, error) ReplicateTimerCanceledEvent(*workflow.HistoryEvent) error diff --git a/service/history/mutableStateBuilder.go b/service/history/mutableStateBuilder.go index 2a00f026f7c..a66c1bc7d76 100644 --- a/service/history/mutableStateBuilder.go +++ b/service/history/mutableStateBuilder.go @@ -22,11 +22,9 @@ package history import ( "fmt" - "math" "time" "github.com/pborman/uuid" - h "github.com/uber/cadence/.gen/go/history" workflow "github.com/uber/cadence/.gen/go/shared" "github.com/uber/cadence/common" @@ -120,7 +118,8 @@ type ( insertReplicationTasks []persistence.Task insertTimerTasks []persistence.Task - taskGenerator mutableStateTaskGenerator + taskGenerator mutableStateTaskGenerator + decisionTaskManager mutableStateDecisionTaskManager shard ShardContext clusterMetadata cluster.Metadata @@ -193,12 +192,8 @@ func newMutableStateBuilder( LastProcessedEvent: common.EmptyEventID, } s.hBuilder = newHistoryBuilder(s, logger) - - s.taskGenerator = newMutableStateTaskGenerator( - shard.GetDomainCache(), - s.logger, - s, - ) + s.taskGenerator = newMutableStateTaskGenerator(shard.GetDomainCache(), s.logger, s) + s.decisionTaskManager = newMutableStateDecisionTaskManager(s) return s } @@ -770,6 +765,10 @@ func (e *mutableStateBuilder) IsCurrentWorkflowGuaranteed() bool { } } +func (e *mutableStateBuilder) GetDomainName() string { + return e.domainName +} + func (e *mutableStateBuilder) IsStickyTaskListEnabled() bool { if e.executionInfo.StickyTaskList == "" { return false @@ -1193,6 +1192,7 @@ func (e *mutableStateBuilder) ReplicateActivityInfo( ai.Attempt = request.GetAttempt() ai.LastFailureReason = request.GetLastFailureReason() ai.LastWorkerIdentity = request.GetLastWorkerIdentity() + ai.LastFailureDetails = request.GetLastFailureDetails() if resetActivityTimerTaskStatus { ai.TimerTaskStatus = TimerTaskStatusNone @@ -1292,12 +1292,7 @@ func (e *mutableStateBuilder) getDecisionInfo() *decisionInfo { func (e *mutableStateBuilder) GetDecisionInfo( scheduleEventID int64, ) (*decisionInfo, bool) { - - decision := e.getDecisionInfo() - if scheduleEventID == decision.ScheduleID { - return decision, true - } - return nil, false + return e.decisionTaskManager.GetDecisionInfo(scheduleEventID) } func (e *mutableStateBuilder) GetPendingActivityInfos() map[int64]*persistence.ActivityInfo { @@ -1321,34 +1316,23 @@ func (e *mutableStateBuilder) GetPendingSignalExternalInfos() map[int64]*persist } func (e *mutableStateBuilder) HasProcessedOrPendingDecision() bool { - return e.HasPendingDecision() || e.GetPreviousStartedEventID() != common.EmptyEventID + return e.decisionTaskManager.HasProcessedOrPendingDecision() } func (e *mutableStateBuilder) HasPendingDecision() bool { - return e.executionInfo.DecisionScheduleID != common.EmptyEventID + return e.decisionTaskManager.HasPendingDecision() } func (e *mutableStateBuilder) GetPendingDecision() (*decisionInfo, bool) { - if e.executionInfo.DecisionScheduleID == common.EmptyEventID { - return nil, false - } - - decision := e.getDecisionInfo() - return decision, true + return e.decisionTaskManager.GetPendingDecision() } func (e *mutableStateBuilder) HasInFlightDecision() bool { - return e.executionInfo.DecisionStartedID > 0 + return e.decisionTaskManager.HasInFlightDecision() } func (e *mutableStateBuilder) GetInFlightDecision() (*decisionInfo, bool) { - if e.executionInfo.DecisionScheduleID == common.EmptyEventID || - e.executionInfo.DecisionStartedID == common.EmptyEventID { - return nil, false - } - - decision := e.getDecisionInfo() - return decision, true + return e.decisionTaskManager.GetInFlightDecision() } func (e *mutableStateBuilder) HasBufferedEvents() bool { @@ -1369,70 +1353,18 @@ func (e *mutableStateBuilder) HasBufferedEvents() bool { func (e *mutableStateBuilder) UpdateDecision( decision *decisionInfo, ) { - - e.executionInfo.DecisionVersion = decision.Version - e.executionInfo.DecisionScheduleID = decision.ScheduleID - e.executionInfo.DecisionStartedID = decision.StartedID - e.executionInfo.DecisionRequestID = decision.RequestID - e.executionInfo.DecisionTimeout = decision.DecisionTimeout - e.executionInfo.DecisionAttempt = decision.Attempt - e.executionInfo.DecisionStartedTimestamp = decision.StartedTimestamp - e.executionInfo.DecisionScheduledTimestamp = decision.ScheduledTimestamp - e.executionInfo.DecisionOriginalScheduledTimestamp = decision.OriginalScheduledTimestamp - - // NOTE: do not update tasklist in execution info - - e.logger.Debug(fmt.Sprintf( - "Decision Updated: {Schedule: %v, Started: %v, ID: %v, Timeout: %v, Attempt: %v, Timestamp: %v}", - decision.ScheduleID, - decision.StartedID, - decision.RequestID, - decision.DecisionTimeout, - decision.Attempt, - decision.StartedTimestamp, - )) + e.decisionTaskManager.UpdateDecision(decision) } // DeleteDecision deletes a decision task. func (e *mutableStateBuilder) DeleteDecision() { - resetDecisionInfo := &decisionInfo{ - Version: common.EmptyVersion, - ScheduleID: common.EmptyEventID, - StartedID: common.EmptyEventID, - RequestID: emptyUUID, - DecisionTimeout: 0, - Attempt: 0, - StartedTimestamp: 0, - ScheduledTimestamp: 0, - TaskList: "", - // Keep the last original scheduled timestamp, so that AddDecisionAsHeartbeat can continue with it. - OriginalScheduledTimestamp: e.getDecisionInfo().OriginalScheduledTimestamp, - } - e.UpdateDecision(resetDecisionInfo) + e.decisionTaskManager.DeleteDecision() } func (e *mutableStateBuilder) FailDecision( incrementAttempt bool, ) { - - // Clear stickiness whenever decision fails - e.ClearStickyness() - - failDecisionInfo := &decisionInfo{ - Version: common.EmptyVersion, - ScheduleID: common.EmptyEventID, - StartedID: common.EmptyEventID, - RequestID: emptyUUID, - DecisionTimeout: 0, - StartedTimestamp: 0, - TaskList: "", - OriginalScheduledTimestamp: 0, - } - if incrementAttempt { - failDecisionInfo.Attempt = e.executionInfo.DecisionAttempt + 1 - failDecisionInfo.ScheduledTimestamp = e.timeSource.Now().UnixNano() - } - e.UpdateDecision(failDecisionInfo) + e.decisionTaskManager.FailDecision(incrementAttempt) } func (e *mutableStateBuilder) ClearStickyness() { @@ -1766,50 +1698,13 @@ func (e *mutableStateBuilder) ReplicateWorkflowExecutionStartedEvent( func (e *mutableStateBuilder) AddFirstDecisionTaskScheduled( startEvent *workflow.HistoryEvent, ) error { - - // handle first decision case, i.e. possible delayed decision - // - // below handles the following cases: - // 1. if not continue as new & if workflow has no parent - // -> schedule decision & schedule delayed decision - // 2. if not continue as new & if workflow has parent - // -> this function should not be called during workflow start, but should be called as - // part of schedule decision in 2 phase commit - // - // if continue as new - // 1. whether has parent workflow or not - // -> schedule decision & schedule delayed decision - // - startAttr := startEvent.WorkflowExecutionStartedEventAttributes - decisionBackoffDuration := time.Duration(startAttr.GetFirstDecisionTaskBackoffSeconds()) * time.Second - - var err error - if decisionBackoffDuration != 0 { - if err = e.taskGenerator.generateDelayedDecisionTasks( - e.unixNanoToTime(startEvent.GetTimestamp()), - startEvent, - ); err != nil { - return err - } - } else { - if _, err = e.AddDecisionTaskScheduledEvent( - false, - ); err != nil { - return err - } - } - - return nil + return e.decisionTaskManager.AddFirstDecisionTaskScheduled(startEvent) } func (e *mutableStateBuilder) AddDecisionTaskScheduledEvent( bypassTaskGeneration bool, ) (*decisionInfo, error) { - - return e.AddDecisionTaskScheduledEventAsHeartbeat( - bypassTaskGeneration, - e.timeSource.Now().UnixNano(), - ) + return e.decisionTaskManager.AddDecisionTaskScheduledEvent(bypassTaskGeneration) } // originalScheduledTimestamp is to record the first scheduled decision during decision heartbeat. @@ -1817,111 +1712,15 @@ func (e *mutableStateBuilder) AddDecisionTaskScheduledEventAsHeartbeat( bypassTaskGeneration bool, originalScheduledTimestamp int64, ) (*decisionInfo, error) { - opTag := tag.WorkflowActionDecisionTaskScheduled if err := e.checkMutability(opTag); err != nil { return nil, err } - - if e.HasPendingDecision() { - e.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag, - tag.WorkflowEventID(e.GetNextEventID()), - tag.ErrorTypeInvalidHistoryAction, - tag.WorkflowScheduleID(e.executionInfo.DecisionScheduleID)) - return nil, e.createInternalServerError(opTag) - } - - // Tasklist and decision timeout should already be set from workflow execution started event - taskList := e.executionInfo.TaskList - if e.IsStickyTaskListEnabled() { - taskList = e.executionInfo.StickyTaskList - } else { - // It can be because stickyness has expired due to StickyTTL config - // In that case we need to clear stickyness so that the LastUpdateTimestamp is not corrupted. - // In other cases, clearing stickyness shouldn't hurt anything. - // TODO: https://github.com/uber/cadence/issues/2357: - // if we can use a new field(LastDecisionUpdateTimestamp), then we could get rid of it. - e.ClearStickyness() - } - startToCloseTimeoutSeconds := e.executionInfo.DecisionTimeoutValue - - // Flush any buffered events before creating the decision, otherwise it will result in invalid IDs for transient - // decision and will cause in timeout processing to not work for transient decisions - if e.HasBufferedEvents() { - // if creating a decision and in the mean time events are flushed from buffered events - // than this decision cannot be a transient decision - e.executionInfo.DecisionAttempt = 0 - if err := e.FlushBufferedEvents(); err != nil { - return nil, err - } - } - - var newDecisionEvent *workflow.HistoryEvent - scheduleID := e.GetNextEventID() // we will generate the schedule event later for repeatedly failing decisions - // Avoid creating new history events when decisions are continuously failing - scheduleTime := e.timeSource.Now().UnixNano() - if e.executionInfo.DecisionAttempt == 0 { - newDecisionEvent = e.hBuilder.AddDecisionTaskScheduledEvent(taskList, startToCloseTimeoutSeconds, - e.executionInfo.DecisionAttempt) - scheduleID = newDecisionEvent.GetEventId() - scheduleTime = newDecisionEvent.GetTimestamp() - } - - decision, err := e.ReplicateDecisionTaskScheduledEvent( - e.GetCurrentVersion(), - scheduleID, - taskList, - startToCloseTimeoutSeconds, - e.executionInfo.DecisionAttempt, - scheduleTime, - originalScheduledTimestamp, - ) - if err != nil { - return nil, err - } - - // TODO merge active & passive task generation - if !bypassTaskGeneration { - if err := e.taskGenerator.generateDecisionScheduleTasks( - e.unixNanoToTime(scheduleTime), // schedule time is now - scheduleID, - ); err != nil { - return nil, err - } - } - - return decision, nil + return e.decisionTaskManager.AddDecisionTaskScheduledEventAsHeartbeat(bypassTaskGeneration, originalScheduledTimestamp) } func (e *mutableStateBuilder) ReplicateTransientDecisionTaskScheduled() (*decisionInfo, error) { - if e.HasPendingDecision() || e.GetExecutionInfo().DecisionAttempt == 0 { - return nil, nil - } - - // the schedule ID for this decision is guaranteed to be wrong - // since the next event ID is assigned at the very end of when - // all events are applied for replication. - // this is OK - // 1. if a failover happen just after this transient decision, - // AddDecisionTaskStartedEvent will handle the correction of schedule ID - // and set the attempt to 0 - // 2. if no failover happen during the life time of this transient decision - // then ReplicateDecisionTaskScheduledEvent will overwrite everything - // including the decision schedule ID - decision := &decisionInfo{ - Version: e.GetCurrentVersion(), - ScheduleID: e.GetNextEventID(), - StartedID: common.EmptyEventID, - RequestID: emptyUUID, - DecisionTimeout: e.GetExecutionInfo().DecisionTimeoutValue, - TaskList: e.GetExecutionInfo().TaskList, - Attempt: e.GetExecutionInfo().DecisionAttempt, - ScheduledTimestamp: e.timeSource.Now().UnixNano(), - StartedTimestamp: 0, - } - - e.UpdateDecision(decision) - return decision, nil + return e.decisionTaskManager.ReplicateTransientDecisionTaskScheduled() } func (e *mutableStateBuilder) ReplicateDecisionTaskScheduledEvent( @@ -1933,32 +1732,7 @@ func (e *mutableStateBuilder) ReplicateDecisionTaskScheduledEvent( scheduleTimestamp int64, originalScheduledTimestamp int64, ) (*decisionInfo, error) { - - // set workflow state to running, since decision is scheduled - if state, _ := e.GetWorkflowStateCloseStatus(); state == persistence.WorkflowStateCreated { - if err := e.UpdateWorkflowStateCloseStatus( - persistence.WorkflowStateRunning, - persistence.WorkflowCloseStatusNone, - ); err != nil { - return nil, err - } - } - - decision := &decisionInfo{ - Version: version, - ScheduleID: scheduleID, - StartedID: common.EmptyEventID, - RequestID: emptyUUID, - DecisionTimeout: startToCloseTimeoutSeconds, - TaskList: taskList, - Attempt: attempt, - ScheduledTimestamp: scheduleTimestamp, - StartedTimestamp: 0, - OriginalScheduledTimestamp: originalScheduledTimestamp, - } - - e.UpdateDecision(decision) - return decision, nil + return e.decisionTaskManager.ReplicateDecisionTaskScheduledEvent(version, scheduleID, taskList, startToCloseTimeoutSeconds, attempt, scheduleTimestamp, originalScheduledTimestamp) } func (e *mutableStateBuilder) AddDecisionTaskStartedEvent( @@ -1966,51 +1740,11 @@ func (e *mutableStateBuilder) AddDecisionTaskStartedEvent( requestID string, request *workflow.PollForDecisionTaskRequest, ) (*workflow.HistoryEvent, *decisionInfo, error) { - opTag := tag.WorkflowActionDecisionTaskStarted if err := e.checkMutability(opTag); err != nil { return nil, nil, err } - - decision, ok := e.GetDecisionInfo(scheduleEventID) - if !ok || decision.StartedID != common.EmptyEventID { - e.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag, - tag.WorkflowEventID(e.GetNextEventID()), - tag.ErrorTypeInvalidHistoryAction, - tag.WorkflowScheduleID(scheduleEventID)) - return nil, nil, e.createInternalServerError(opTag) - } - - var event *workflow.HistoryEvent - scheduleID := decision.ScheduleID - startedID := scheduleID + 1 - tasklist := request.TaskList.GetName() - startTime := e.timeSource.Now().UnixNano() - // First check to see if new events came since transient decision was scheduled - if decision.Attempt > 0 && decision.ScheduleID != e.GetNextEventID() { - // Also create a new DecisionTaskScheduledEvent since new events came in when it was scheduled - scheduleEvent := e.hBuilder.AddDecisionTaskScheduledEvent(tasklist, decision.DecisionTimeout, 0) - scheduleID = scheduleEvent.GetEventId() - decision.Attempt = 0 - } - - // Avoid creating new history events when decisions are continuously failing - if decision.Attempt == 0 { - // Now create DecisionTaskStartedEvent - event = e.hBuilder.AddDecisionTaskStartedEvent(scheduleID, requestID, request.GetIdentity()) - startedID = event.GetEventId() - startTime = event.GetTimestamp() - } - - decision, err := e.ReplicateDecisionTaskStartedEvent(decision, e.GetCurrentVersion(), scheduleID, startedID, requestID, startTime) - // TODO merge active & passive task generation - if err := e.taskGenerator.generateDecisionStartTasks( - e.unixNanoToTime(startTime), // start time is now - scheduleID, - ); err != nil { - return nil, nil, err - } - return event, decision, err + return e.decisionTaskManager.AddDecisionTaskStartedEvent(scheduleEventID, requestID, request) } func (e *mutableStateBuilder) ReplicateDecisionTaskStartedEvent( @@ -2021,80 +1755,15 @@ func (e *mutableStateBuilder) ReplicateDecisionTaskStartedEvent( requestID string, timestamp int64, ) (*decisionInfo, error) { - // Replicator calls it with a nil decision info, and it is safe to always lookup the decision in this case as it - // does not have to deal with transient decision case. - var ok bool - if decision == nil { - decision, ok = e.GetDecisionInfo(scheduleID) - if !ok { - return nil, errors.NewInternalFailureError(fmt.Sprintf("unable to find decision: %v", scheduleID)) - } - // setting decision attempt to 0 for decision task replication - // this mainly handles transient decision completion - // for transient decision, active side will write 2 batch in a "transaction" - // 1. decision task scheduled & decision task started - // 2. decision task completed & other events - // since we need to treat each individual event batch as one transaction - // certain "magic" needs to be done, i.e. setting attempt to 0 so - // if first batch is replicated, but not the second one, decision can be correctly timed out - decision.Attempt = 0 - } - - // Update mutable decision state - decision = &decisionInfo{ - Version: version, - ScheduleID: scheduleID, - StartedID: startedID, - RequestID: requestID, - DecisionTimeout: decision.DecisionTimeout, - Attempt: decision.Attempt, - StartedTimestamp: timestamp, - ScheduledTimestamp: decision.ScheduledTimestamp, - TaskList: decision.TaskList, - OriginalScheduledTimestamp: decision.OriginalScheduledTimestamp, - } - - e.UpdateDecision(decision) - return decision, nil + + return e.decisionTaskManager.ReplicateDecisionTaskStartedEvent(decision, version, scheduleID, startedID, requestID, timestamp) } func (e *mutableStateBuilder) CreateTransientDecisionEvents( decision *decisionInfo, identity string, ) (*workflow.HistoryEvent, *workflow.HistoryEvent) { - - tasklist := e.executionInfo.TaskList - - scheduledEvent := newDecisionTaskScheduledEventWithInfo( - decision.ScheduleID, - decision.ScheduledTimestamp, - tasklist, - decision.DecisionTimeout, - decision.Attempt, - ) - - startedEvent := newDecisionTaskStartedEventWithInfo( - decision.StartedID, - decision.StartedTimestamp, - decision.ScheduleID, - decision.RequestID, - identity, - ) - - return scheduledEvent, startedEvent -} - -func (e *mutableStateBuilder) beforeAddDecisionTaskCompletedEvent() { - // Make sure to delete decision before adding events. Otherwise they are buffered rather than getting appended - e.DeleteDecision() -} - -func (e *mutableStateBuilder) afterAddDecisionTaskCompletedEvent( - event *workflow.HistoryEvent, - maxResetPoints int, -) { - e.executionInfo.LastProcessedEvent = event.GetDecisionTaskCompletedEventAttributes().GetStartedEventId() - e.addBinaryCheckSumIfNotExists(event, maxResetPoints) + return e.decisionTaskManager.CreateTransientDecisionEvents(decision, identity) } // add BinaryCheckSum for the first decisionTaskCompletedID for auto-reset @@ -2175,120 +1844,44 @@ func (e *mutableStateBuilder) AddDecisionTaskCompletedEvent( request *workflow.RespondDecisionTaskCompletedRequest, maxResetPoints int, ) (*workflow.HistoryEvent, error) { - opTag := tag.WorkflowActionDecisionTaskCompleted if err := e.checkMutability(opTag); err != nil { return nil, err } - - decision, ok := e.GetDecisionInfo(scheduleEventID) - if !ok || decision.StartedID != startedEventID { - e.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag, - tag.WorkflowEventID(e.GetNextEventID()), - tag.ErrorTypeInvalidHistoryAction, - tag.WorkflowScheduleID(scheduleEventID), - tag.WorkflowStartedID(startedEventID)) - - return nil, e.createInternalServerError(opTag) - } - - e.beforeAddDecisionTaskCompletedEvent() - if decision.Attempt > 0 { - // Create corresponding DecisionTaskSchedule and DecisionTaskStarted events for decisions we have been retrying - scheduledEvent := e.hBuilder.AddTransientDecisionTaskScheduledEvent(e.executionInfo.TaskList, decision.DecisionTimeout, - decision.Attempt, decision.ScheduledTimestamp) - startedEvent := e.hBuilder.AddTransientDecisionTaskStartedEvent(scheduledEvent.GetEventId(), decision.RequestID, - request.GetIdentity(), decision.StartedTimestamp) - startedEventID = startedEvent.GetEventId() - } - // Now write the completed event - event := e.hBuilder.AddDecisionTaskCompletedEvent(scheduleEventID, startedEventID, request) - - e.afterAddDecisionTaskCompletedEvent(event, maxResetPoints) - return event, nil + return e.decisionTaskManager.AddDecisionTaskCompletedEvent(scheduleEventID, startedEventID, request, maxResetPoints) } func (e *mutableStateBuilder) ReplicateDecisionTaskCompletedEvent( event *workflow.HistoryEvent, ) error { - - e.beforeAddDecisionTaskCompletedEvent() - e.afterAddDecisionTaskCompletedEvent(event, math.MaxInt32) - return nil + return e.decisionTaskManager.ReplicateDecisionTaskCompletedEvent(event) } func (e *mutableStateBuilder) AddDecisionTaskTimedOutEvent( scheduleEventID int64, startedEventID int64, ) (*workflow.HistoryEvent, error) { - opTag := tag.WorkflowActionDecisionTaskTimedOut if err := e.checkMutability(opTag); err != nil { return nil, err } - - dt, ok := e.GetDecisionInfo(scheduleEventID) - if !ok || dt.StartedID != startedEventID { - e.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag, - tag.WorkflowEventID(e.GetNextEventID()), - tag.ErrorTypeInvalidHistoryAction, - tag.WorkflowScheduleID(scheduleEventID), - tag.WorkflowStartedID(startedEventID)) - return nil, e.createInternalServerError(opTag) - } - - var event *workflow.HistoryEvent - // Avoid creating new history events when decisions are continuously timing out - if dt.Attempt == 0 { - event = e.hBuilder.AddDecisionTaskTimedOutEvent(scheduleEventID, startedEventID, workflow.TimeoutTypeStartToClose) - } - - if err := e.ReplicateDecisionTaskTimedOutEvent(workflow.TimeoutTypeStartToClose); err != nil { - return nil, err - } - return event, nil + return e.decisionTaskManager.AddDecisionTaskTimedOutEvent(scheduleEventID, startedEventID) } func (e *mutableStateBuilder) ReplicateDecisionTaskTimedOutEvent( timeoutType workflow.TimeoutType, ) error { - - incrementAttempt := true - // Do not increment decision attempt in the case of sticky timeout to prevent creating next decision as transient - if timeoutType == workflow.TimeoutTypeScheduleToStart { - incrementAttempt = false - } - e.FailDecision(incrementAttempt) - return nil + return e.decisionTaskManager.ReplicateDecisionTaskTimedOutEvent(timeoutType) } func (e *mutableStateBuilder) AddDecisionTaskScheduleToStartTimeoutEvent( scheduleEventID int64, ) (*workflow.HistoryEvent, error) { - opTag := tag.WorkflowActionDecisionTaskTimedOut if err := e.checkMutability(opTag); err != nil { return nil, err } - - if e.executionInfo.DecisionScheduleID != scheduleEventID || e.executionInfo.DecisionStartedID > 0 { - e.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag, - tag.WorkflowEventID(e.GetNextEventID()), - tag.ErrorTypeInvalidHistoryAction, - tag.WorkflowScheduleID(scheduleEventID), - ) - return nil, e.createInternalServerError(opTag) - } - - // Clear stickiness whenever decision fails - e.ClearStickyness() - - event := e.hBuilder.AddDecisionTaskTimedOutEvent(scheduleEventID, 0, workflow.TimeoutTypeScheduleToStart) - - if err := e.ReplicateDecisionTaskTimedOutEvent(workflow.TimeoutTypeScheduleToStart); err != nil { - return nil, err - } - return event, nil + return e.decisionTaskManager.AddDecisionTaskScheduleToStartTimeoutEvent(scheduleEventID) } func (e *mutableStateBuilder) AddDecisionTaskFailedEvent( @@ -2302,54 +1895,15 @@ func (e *mutableStateBuilder) AddDecisionTaskFailedEvent( newRunID string, forkEventVersion int64, ) (*workflow.HistoryEvent, error) { - opTag := tag.WorkflowActionDecisionTaskFailed if err := e.checkMutability(opTag); err != nil { return nil, err } - - attr := workflow.DecisionTaskFailedEventAttributes{ - ScheduledEventId: common.Int64Ptr(scheduleEventID), - StartedEventId: common.Int64Ptr(startedEventID), - Cause: common.DecisionTaskFailedCausePtr(cause), - Details: details, - Identity: common.StringPtr(identity), - Reason: common.StringPtr(reason), - BaseRunId: common.StringPtr(baseRunID), - NewRunId: common.StringPtr(newRunID), - ForkEventVersion: common.Int64Ptr(forkEventVersion), - } - - dt, ok := e.GetDecisionInfo(scheduleEventID) - if !ok || dt.StartedID != startedEventID { - e.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag, - tag.WorkflowEventID(e.GetNextEventID()), - tag.ErrorTypeInvalidHistoryAction, - tag.WorkflowScheduleID(scheduleEventID), - tag.WorkflowStartedID(startedEventID)) - return nil, e.createInternalServerError(opTag) - } - - var event *workflow.HistoryEvent - // Only emit DecisionTaskFailedEvent for the very first time - if dt.Attempt == 0 || cause == workflow.DecisionTaskFailedCauseResetWorkflow { - event = e.hBuilder.AddDecisionTaskFailedEvent(attr) - } - - if err := e.ReplicateDecisionTaskFailedEvent(); err != nil { - return nil, err - } - - // always clear decision attempt for reset - if cause == workflow.DecisionTaskFailedCauseResetWorkflow { - e.executionInfo.DecisionAttempt = 0 - } - return event, nil + return e.decisionTaskManager.AddDecisionTaskFailedEvent(scheduleEventID, startedEventID, cause, details, identity, reason, baseRunID, newRunID, forkEventVersion) } func (e *mutableStateBuilder) ReplicateDecisionTaskFailedEvent() error { - e.FailDecision(true) - return nil + return e.decisionTaskManager.ReplicateDecisionTaskFailedEvent() } func (e *mutableStateBuilder) AddActivityTaskScheduledEvent( @@ -2756,7 +2310,10 @@ func (e *mutableStateBuilder) AddActivityTaskCanceledEvent( return event, nil } -func (e *mutableStateBuilder) ReplicateActivityTaskCanceledEvent(event *workflow.HistoryEvent) error { +func (e *mutableStateBuilder) ReplicateActivityTaskCanceledEvent( + event *workflow.HistoryEvent, +) error { + attributes := event.ActivityTaskCanceledEventAttributes scheduleID := attributes.GetScheduledEventId() @@ -2970,7 +2527,7 @@ func (e *mutableStateBuilder) AddRequestCancelExternalWorkflowExecutionInitiated } event := e.hBuilder.AddRequestCancelExternalWorkflowExecutionInitiatedEvent(decisionCompletedEventID, request) - rci, err := e.ReplicateRequestCancelExternalWorkflowExecutionInitiatedEvent(event, cancelRequestID) + rci, err := e.ReplicateRequestCancelExternalWorkflowExecutionInitiatedEvent(decisionCompletedEventID, event, cancelRequestID) if err != nil { return nil, nil, err } @@ -2985,6 +2542,7 @@ func (e *mutableStateBuilder) AddRequestCancelExternalWorkflowExecutionInitiated } func (e *mutableStateBuilder) ReplicateRequestCancelExternalWorkflowExecutionInitiatedEvent( + firstEventID int64, event *workflow.HistoryEvent, cancelRequestID string, ) (*persistence.RequestCancelInfo, error) { @@ -2992,9 +2550,10 @@ func (e *mutableStateBuilder) ReplicateRequestCancelExternalWorkflowExecutionIni // TODO: Evaluate if we need cancelRequestID also part of history event initiatedEventID := event.GetEventId() rci := &persistence.RequestCancelInfo{ - Version: event.GetVersion(), - InitiatedID: initiatedEventID, - CancelRequestID: cancelRequestID, + Version: event.GetVersion(), + InitiatedEventBatchID: firstEventID, + InitiatedID: initiatedEventID, + CancelRequestID: cancelRequestID, } e.pendingRequestCancelInfoIDs[initiatedEventID] = rci @@ -3092,7 +2651,7 @@ func (e *mutableStateBuilder) AddSignalExternalWorkflowExecutionInitiatedEvent( } event := e.hBuilder.AddSignalExternalWorkflowExecutionInitiatedEvent(decisionCompletedEventID, request) - si, err := e.ReplicateSignalExternalWorkflowExecutionInitiatedEvent(event, signalRequestID) + si, err := e.ReplicateSignalExternalWorkflowExecutionInitiatedEvent(decisionCompletedEventID, event, signalRequestID) if err != nil { return nil, nil, err } @@ -3107,6 +2666,7 @@ func (e *mutableStateBuilder) AddSignalExternalWorkflowExecutionInitiatedEvent( } func (e *mutableStateBuilder) ReplicateSignalExternalWorkflowExecutionInitiatedEvent( + firstEventID int64, event *workflow.HistoryEvent, signalRequestID string, ) (*persistence.SignalInfo, error) { @@ -3115,12 +2675,13 @@ func (e *mutableStateBuilder) ReplicateSignalExternalWorkflowExecutionInitiatedE initiatedEventID := event.GetEventId() attributes := event.SignalExternalWorkflowExecutionInitiatedEventAttributes si := &persistence.SignalInfo{ - Version: event.GetVersion(), - InitiatedID: initiatedEventID, - SignalRequestID: signalRequestID, - SignalName: attributes.GetSignalName(), - Input: attributes.Input, - Control: attributes.Control, + Version: event.GetVersion(), + InitiatedEventBatchID: firstEventID, + InitiatedID: initiatedEventID, + SignalRequestID: signalRequestID, + SignalName: attributes.GetSignalName(), + Input: attributes.Input, + Control: attributes.Control, } e.pendingSignalInfoIDs[initiatedEventID] = si @@ -4006,7 +3567,9 @@ func (e *mutableStateBuilder) AddChildWorkflowExecutionTimedOutEvent( return event, nil } -func (e *mutableStateBuilder) ReplicateChildWorkflowExecutionTimedOutEvent(event *workflow.HistoryEvent) error { +func (e *mutableStateBuilder) ReplicateChildWorkflowExecutionTimedOutEvent( + event *workflow.HistoryEvent, +) error { attributes := event.ChildWorkflowExecutionTimedOutEventAttributes initiatedID := attributes.GetInitiatedEventId() @@ -4017,6 +3580,7 @@ func (e *mutableStateBuilder) ReplicateChildWorkflowExecutionTimedOutEvent(event func (e *mutableStateBuilder) RetryActivity( ai *persistence.ActivityInfo, failureReason string, + failureDetails []byte, ) (bool, error) { opTag := tag.WorkflowActionActivityTaskRetry @@ -4055,6 +3619,7 @@ func (e *mutableStateBuilder) RetryActivity( ai.TimerTaskStatus = TimerTaskStatusNone ai.LastFailureReason = failureReason ai.LastWorkerIdentity = ai.StartedIdentity + ai.LastFailureDetails = failureDetails if err := e.taskGenerator.generateActivityRetryTasks( ai.ScheduleID, diff --git a/service/history/mutableStateDecisionTaskManager.go b/service/history/mutableStateDecisionTaskManager.go new file mode 100644 index 00000000000..6ac17c16258 --- /dev/null +++ b/service/history/mutableStateDecisionTaskManager.go @@ -0,0 +1,728 @@ +// The MIT License (MIT) +// +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package history + +import ( + "fmt" + "math" + "time" + + workflow "github.com/uber/cadence/.gen/go/shared" + "github.com/uber/cadence/common" + "github.com/uber/cadence/common/errors" + "github.com/uber/cadence/common/log/tag" + "github.com/uber/cadence/common/persistence" +) + +type ( + mutableStateDecisionTaskManager interface { + ReplicateDecisionTaskScheduledEvent( + version int64, + scheduleID int64, + taskList string, + startToCloseTimeoutSeconds int32, + attempt int64, + scheduleTimestamp int64, + originalScheduledTimestamp int64, + ) (*decisionInfo, error) + ReplicateTransientDecisionTaskScheduled() (*decisionInfo, error) + ReplicateDecisionTaskStartedEvent( + decision *decisionInfo, + version int64, + scheduleID int64, + startedID int64, + requestID string, + timestamp int64, + ) (*decisionInfo, error) + ReplicateDecisionTaskCompletedEvent(event *workflow.HistoryEvent) error + ReplicateDecisionTaskFailedEvent() error + ReplicateDecisionTaskTimedOutEvent(timeoutType workflow.TimeoutType) error + + AddDecisionTaskScheduleToStartTimeoutEvent(scheduleEventID int64) (*workflow.HistoryEvent, error) + AddDecisionTaskScheduledEventAsHeartbeat( + bypassTaskGeneration bool, + originalScheduledTimestamp int64, + ) (*decisionInfo, error) + AddDecisionTaskScheduledEvent(bypassTaskGeneration bool) (*decisionInfo, error) + AddFirstDecisionTaskScheduled(startEvent *workflow.HistoryEvent) error + AddDecisionTaskStartedEvent( + scheduleEventID int64, + requestID string, + request *workflow.PollForDecisionTaskRequest, + ) (*workflow.HistoryEvent, *decisionInfo, error) + AddDecisionTaskCompletedEvent( + scheduleEventID int64, + startedEventID int64, + request *workflow.RespondDecisionTaskCompletedRequest, + maxResetPoints int, + ) (*workflow.HistoryEvent, error) + AddDecisionTaskFailedEvent( + scheduleEventID int64, + startedEventID int64, + cause workflow.DecisionTaskFailedCause, + details []byte, + identity string, + reason string, + baseRunID string, + newRunID string, + forkEventVersion int64, + ) (*workflow.HistoryEvent, error) + AddDecisionTaskTimedOutEvent(scheduleEventID int64, startedEventID int64) (*workflow.HistoryEvent, error) + + FailDecision(incrementAttempt bool) + DeleteDecision() + UpdateDecision(decision *decisionInfo) + + HasPendingDecision() bool + GetPendingDecision() (*decisionInfo, bool) + HasInFlightDecision() bool + GetInFlightDecision() (*decisionInfo, bool) + HasProcessedOrPendingDecision() bool + GetDecisionInfo(scheduleEventID int64) (*decisionInfo, bool) + + CreateTransientDecisionEvents(decision *decisionInfo, identity string) (*workflow.HistoryEvent, *workflow.HistoryEvent) + } + + mutableStateDecisionTaskManagerImpl struct { + msb *mutableStateBuilder + } +) + +func newMutableStateDecisionTaskManager(msb *mutableStateBuilder) mutableStateDecisionTaskManager { + return &mutableStateDecisionTaskManagerImpl{ + msb: msb, + } +} + +func (m *mutableStateDecisionTaskManagerImpl) ReplicateDecisionTaskScheduledEvent( + version int64, + scheduleID int64, + taskList string, + startToCloseTimeoutSeconds int32, + attempt int64, + scheduleTimestamp int64, + originalScheduledTimestamp int64, +) (*decisionInfo, error) { + decision := &decisionInfo{ + Version: version, + ScheduleID: scheduleID, + StartedID: common.EmptyEventID, + RequestID: emptyUUID, + DecisionTimeout: startToCloseTimeoutSeconds, + TaskList: taskList, + Attempt: attempt, + ScheduledTimestamp: scheduleTimestamp, + StartedTimestamp: 0, + OriginalScheduledTimestamp: originalScheduledTimestamp, + } + + m.UpdateDecision(decision) + return decision, nil +} + +func (m *mutableStateDecisionTaskManagerImpl) ReplicateTransientDecisionTaskScheduled() (*decisionInfo, error) { + if m.HasPendingDecision() || m.msb.GetExecutionInfo().DecisionAttempt == 0 { + return nil, nil + } + + // the schedule ID for this decision is guaranteed to be wrong + // since the next event ID is assigned at the very end of when + // all events are applied for replication. + // this is OK + // 1. if a failover happen just after this transient decision, + // AddDecisionTaskStartedEvent will handle the correction of schedule ID + // and set the attempt to 0 + // 2. if no failover happen during the life time of this transient decision + // then ReplicateDecisionTaskScheduledEvent will overwrite everything + // including the decision schedule ID + decision := &decisionInfo{ + Version: m.msb.GetCurrentVersion(), + ScheduleID: m.msb.GetNextEventID(), + StartedID: common.EmptyEventID, + RequestID: emptyUUID, + DecisionTimeout: m.msb.GetExecutionInfo().DecisionTimeoutValue, + TaskList: m.msb.GetExecutionInfo().TaskList, + Attempt: m.msb.GetExecutionInfo().DecisionAttempt, + ScheduledTimestamp: m.msb.timeSource.Now().UnixNano(), + StartedTimestamp: 0, + } + + m.UpdateDecision(decision) + return decision, nil +} + +func (m *mutableStateDecisionTaskManagerImpl) ReplicateDecisionTaskStartedEvent( + decision *decisionInfo, + version int64, + scheduleID int64, + startedID int64, + requestID string, + timestamp int64, +) (*decisionInfo, error) { + // Replicator calls it with a nil decision info, and it is safe to always lookup the decision in this case as it + // does not have to deal with transient decision case. + var ok bool + if decision == nil { + decision, ok = m.GetDecisionInfo(scheduleID) + if !ok { + return nil, errors.NewInternalFailureError(fmt.Sprintf("unable to find decision: %v", scheduleID)) + } + // setting decision attempt to 0 for decision task replication + // this mainly handles transient decision completion + // for transient decision, active side will write 2 batch in a "transaction" + // 1. decision task scheduled & decision task started + // 2. decision task completed & other events + // since we need to treat each individual event batch as one transaction + // certain "magic" needs to be done, i.e. setting attempt to 0 so + // if first batch is replicated, but not the second one, decision can be correctly timed out + decision.Attempt = 0 + } + + // set workflow state to running, since decision is scheduled + if state, _ := m.msb.GetWorkflowStateCloseStatus(); state == persistence.WorkflowStateCreated { + if err := m.msb.UpdateWorkflowStateCloseStatus( + persistence.WorkflowStateRunning, + persistence.WorkflowCloseStatusNone, + ); err != nil { + return nil, err + } + } + + // Update mutable decision state + decision = &decisionInfo{ + Version: version, + ScheduleID: scheduleID, + StartedID: startedID, + RequestID: requestID, + DecisionTimeout: decision.DecisionTimeout, + Attempt: decision.Attempt, + StartedTimestamp: timestamp, + ScheduledTimestamp: decision.ScheduledTimestamp, + TaskList: decision.TaskList, + OriginalScheduledTimestamp: decision.OriginalScheduledTimestamp, + } + + m.UpdateDecision(decision) + return decision, nil +} + +func (m *mutableStateDecisionTaskManagerImpl) ReplicateDecisionTaskCompletedEvent( + event *workflow.HistoryEvent, +) error { + m.beforeAddDecisionTaskCompletedEvent() + m.afterAddDecisionTaskCompletedEvent(event, math.MaxInt32) + return nil +} + +func (m *mutableStateDecisionTaskManagerImpl) ReplicateDecisionTaskFailedEvent() error { + m.FailDecision(true) + return nil +} + +func (m *mutableStateDecisionTaskManagerImpl) ReplicateDecisionTaskTimedOutEvent( + timeoutType workflow.TimeoutType, +) error { + + incrementAttempt := true + // Do not increment decision attempt in the case of sticky timeout to prevent creating next decision as transient + if timeoutType == workflow.TimeoutTypeScheduleToStart { + incrementAttempt = false + } + m.FailDecision(incrementAttempt) + return nil +} + +func (m *mutableStateDecisionTaskManagerImpl) AddDecisionTaskScheduleToStartTimeoutEvent( + scheduleEventID int64, +) (*workflow.HistoryEvent, error) { + opTag := tag.WorkflowActionDecisionTaskTimedOut + if m.msb.executionInfo.DecisionScheduleID != scheduleEventID || m.msb.executionInfo.DecisionStartedID > 0 { + m.msb.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag, + tag.WorkflowEventID(m.msb.GetNextEventID()), + tag.ErrorTypeInvalidHistoryAction, + tag.WorkflowScheduleID(scheduleEventID), + ) + return nil, m.msb.createInternalServerError(opTag) + } + + // Clear stickiness whenever decision fails + m.msb.ClearStickyness() + + event := m.msb.hBuilder.AddDecisionTaskTimedOutEvent(scheduleEventID, 0, workflow.TimeoutTypeScheduleToStart) + + if err := m.ReplicateDecisionTaskTimedOutEvent(workflow.TimeoutTypeScheduleToStart); err != nil { + return nil, err + } + return event, nil +} + +// originalScheduledTimestamp is to record the first scheduled decision during decision heartbeat. +func (m *mutableStateDecisionTaskManagerImpl) AddDecisionTaskScheduledEventAsHeartbeat( + bypassTaskGeneration bool, + originalScheduledTimestamp int64, +) (*decisionInfo, error) { + opTag := tag.WorkflowActionDecisionTaskScheduled + if m.HasPendingDecision() { + m.msb.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag, + tag.WorkflowEventID(m.msb.GetNextEventID()), + tag.ErrorTypeInvalidHistoryAction, + tag.WorkflowScheduleID(m.msb.executionInfo.DecisionScheduleID)) + return nil, m.msb.createInternalServerError(opTag) + } + + // set workflow state to running + // since decision is scheduled + m.msb.executionInfo.State = persistence.WorkflowStateRunning + + // Tasklist and decision timeout should already be set from workflow execution started event + taskList := m.msb.executionInfo.TaskList + if m.msb.IsStickyTaskListEnabled() { + taskList = m.msb.executionInfo.StickyTaskList + } else { + // It can be because stickyness has expired due to StickyTTL config + // In that case we need to clear stickyness so that the LastUpdateTimestamp is not corrupted. + // In other cases, clearing stickyness shouldn't hurt anything. + // TODO: https://github.com/uber/cadence/issues/2357: + // if we can use a new field(LastDecisionUpdateTimestamp), then we could get rid of it. + m.msb.ClearStickyness() + } + startToCloseTimeoutSeconds := m.msb.executionInfo.DecisionTimeoutValue + + // Flush any buffered events before creating the decision, otherwise it will result in invalid IDs for transient + // decision and will cause in timeout processing to not work for transient decisions + if m.msb.HasBufferedEvents() { + // if creating a decision and in the mean time events are flushed from buffered events + // than this decision cannot be a transient decision + m.msb.executionInfo.DecisionAttempt = 0 + if err := m.msb.FlushBufferedEvents(); err != nil { + return nil, err + } + } + + var newDecisionEvent *workflow.HistoryEvent + scheduleID := m.msb.GetNextEventID() // we will generate the schedule event later for repeatedly failing decisions + // Avoid creating new history events when decisions are continuously failing + scheduleTime := m.msb.timeSource.Now().UnixNano() + if m.msb.executionInfo.DecisionAttempt == 0 { + newDecisionEvent = m.msb.hBuilder.AddDecisionTaskScheduledEvent(taskList, startToCloseTimeoutSeconds, + m.msb.executionInfo.DecisionAttempt) + scheduleID = newDecisionEvent.GetEventId() + scheduleTime = newDecisionEvent.GetTimestamp() + } + + decision, err := m.ReplicateDecisionTaskScheduledEvent( + m.msb.GetCurrentVersion(), + scheduleID, + taskList, + startToCloseTimeoutSeconds, + m.msb.executionInfo.DecisionAttempt, + scheduleTime, + originalScheduledTimestamp, + ) + if err != nil { + return nil, err + } + + // TODO merge active & passive task generation + if !bypassTaskGeneration { + if err := m.msb.taskGenerator.generateDecisionScheduleTasks( + m.msb.unixNanoToTime(scheduleTime), // schedule time is now + scheduleID, + ); err != nil { + return nil, err + } + } + + return decision, nil +} + +func (m *mutableStateDecisionTaskManagerImpl) AddDecisionTaskScheduledEvent( + bypassTaskGeneration bool, +) (*decisionInfo, error) { + return m.AddDecisionTaskScheduledEventAsHeartbeat(bypassTaskGeneration, m.msb.timeSource.Now().UnixNano()) +} + +func (m *mutableStateDecisionTaskManagerImpl) AddFirstDecisionTaskScheduled( + startEvent *workflow.HistoryEvent, +) error { + + // handle first decision case, i.e. possible delayed decision + // + // below handles the following cases: + // 1. if not continue as new & if workflow has no parent + // -> schedule decision & schedule delayed decision + // 2. if not continue as new & if workflow has parent + // -> this function should not be called during workflow start, but should be called as + // part of schedule decision in 2 phase commit + // + // if continue as new + // 1. whether has parent workflow or not + // -> schedule decision & schedule delayed decision + // + startAttr := startEvent.WorkflowExecutionStartedEventAttributes + decisionBackoffDuration := time.Duration(startAttr.GetFirstDecisionTaskBackoffSeconds()) * time.Second + + var err error + if decisionBackoffDuration != 0 { + if err = m.msb.taskGenerator.generateDelayedDecisionTasks( + m.msb.unixNanoToTime(startEvent.GetTimestamp()), + startEvent, + ); err != nil { + return err + } + } else { + if _, err = m.AddDecisionTaskScheduledEvent( + false, + ); err != nil { + return err + } + } + + return nil +} + +func (m *mutableStateDecisionTaskManagerImpl) AddDecisionTaskStartedEvent( + scheduleEventID int64, + requestID string, + request *workflow.PollForDecisionTaskRequest, +) (*workflow.HistoryEvent, *decisionInfo, error) { + opTag := tag.WorkflowActionDecisionTaskStarted + decision, ok := m.GetDecisionInfo(scheduleEventID) + if !ok || decision.StartedID != common.EmptyEventID { + m.msb.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag, + tag.WorkflowEventID(m.msb.GetNextEventID()), + tag.ErrorTypeInvalidHistoryAction, + tag.WorkflowScheduleID(scheduleEventID)) + return nil, nil, m.msb.createInternalServerError(opTag) + } + + var event *workflow.HistoryEvent + scheduleID := decision.ScheduleID + startedID := scheduleID + 1 + tasklist := request.TaskList.GetName() + startTime := m.msb.timeSource.Now().UnixNano() + // First check to see if new events came since transient decision was scheduled + if decision.Attempt > 0 && decision.ScheduleID != m.msb.GetNextEventID() { + // Also create a new DecisionTaskScheduledEvent since new events came in when it was scheduled + scheduleEvent := m.msb.hBuilder.AddDecisionTaskScheduledEvent(tasklist, decision.DecisionTimeout, 0) + scheduleID = scheduleEvent.GetEventId() + decision.Attempt = 0 + } + + // Avoid creating new history events when decisions are continuously failing + if decision.Attempt == 0 { + // Now create DecisionTaskStartedEvent + event = m.msb.hBuilder.AddDecisionTaskStartedEvent(scheduleID, requestID, request.GetIdentity()) + startedID = event.GetEventId() + startTime = event.GetTimestamp() + } + + decision, err := m.ReplicateDecisionTaskStartedEvent(decision, m.msb.GetCurrentVersion(), scheduleID, startedID, requestID, startTime) + // TODO merge active & passive task generation + if err := m.msb.taskGenerator.generateDecisionStartTasks( + m.msb.unixNanoToTime(startTime), // start time is now + scheduleID, + ); err != nil { + return nil, nil, err + } + return event, decision, err +} + +func (m *mutableStateDecisionTaskManagerImpl) AddDecisionTaskCompletedEvent( + scheduleEventID int64, + startedEventID int64, + request *workflow.RespondDecisionTaskCompletedRequest, + maxResetPoints int, +) (*workflow.HistoryEvent, error) { + opTag := tag.WorkflowActionDecisionTaskCompleted + decision, ok := m.GetDecisionInfo(scheduleEventID) + if !ok || decision.StartedID != startedEventID { + m.msb.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag, + tag.WorkflowEventID(m.msb.GetNextEventID()), + tag.ErrorTypeInvalidHistoryAction, + tag.WorkflowScheduleID(scheduleEventID), + tag.WorkflowStartedID(startedEventID)) + + return nil, m.msb.createInternalServerError(opTag) + } + + m.beforeAddDecisionTaskCompletedEvent() + if decision.Attempt > 0 { + // Create corresponding DecisionTaskSchedule and DecisionTaskStarted events for decisions we have been retrying + scheduledEvent := m.msb.hBuilder.AddTransientDecisionTaskScheduledEvent(m.msb.executionInfo.TaskList, decision.DecisionTimeout, + decision.Attempt, decision.ScheduledTimestamp) + startedEvent := m.msb.hBuilder.AddTransientDecisionTaskStartedEvent(scheduledEvent.GetEventId(), decision.RequestID, + request.GetIdentity(), decision.StartedTimestamp) + startedEventID = startedEvent.GetEventId() + } + // Now write the completed event + event := m.msb.hBuilder.AddDecisionTaskCompletedEvent(scheduleEventID, startedEventID, request) + + m.afterAddDecisionTaskCompletedEvent(event, maxResetPoints) + return event, nil +} + +func (m *mutableStateDecisionTaskManagerImpl) AddDecisionTaskFailedEvent( + scheduleEventID int64, + startedEventID int64, + cause workflow.DecisionTaskFailedCause, + details []byte, + identity string, + reason string, + baseRunID string, + newRunID string, + forkEventVersion int64, +) (*workflow.HistoryEvent, error) { + opTag := tag.WorkflowActionDecisionTaskFailed + attr := workflow.DecisionTaskFailedEventAttributes{ + ScheduledEventId: common.Int64Ptr(scheduleEventID), + StartedEventId: common.Int64Ptr(startedEventID), + Cause: common.DecisionTaskFailedCausePtr(cause), + Details: details, + Identity: common.StringPtr(identity), + Reason: common.StringPtr(reason), + BaseRunId: common.StringPtr(baseRunID), + NewRunId: common.StringPtr(newRunID), + ForkEventVersion: common.Int64Ptr(forkEventVersion), + } + + dt, ok := m.GetDecisionInfo(scheduleEventID) + if !ok || dt.StartedID != startedEventID { + m.msb.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag, + tag.WorkflowEventID(m.msb.GetNextEventID()), + tag.ErrorTypeInvalidHistoryAction, + tag.WorkflowScheduleID(scheduleEventID), + tag.WorkflowStartedID(startedEventID)) + return nil, m.msb.createInternalServerError(opTag) + } + + var event *workflow.HistoryEvent + // Only emit DecisionTaskFailedEvent for the very first time + if dt.Attempt == 0 || cause == workflow.DecisionTaskFailedCauseResetWorkflow { + event = m.msb.hBuilder.AddDecisionTaskFailedEvent(attr) + } + + if err := m.ReplicateDecisionTaskFailedEvent(); err != nil { + return nil, err + } + + // always clear decision attempt for reset + if cause == workflow.DecisionTaskFailedCauseResetWorkflow { + m.msb.executionInfo.DecisionAttempt = 0 + } + return event, nil +} + +func (m *mutableStateDecisionTaskManagerImpl) AddDecisionTaskTimedOutEvent( + scheduleEventID int64, + startedEventID int64, +) (*workflow.HistoryEvent, error) { + opTag := tag.WorkflowActionDecisionTaskTimedOut + dt, ok := m.GetDecisionInfo(scheduleEventID) + if !ok || dt.StartedID != startedEventID { + m.msb.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag, + tag.WorkflowEventID(m.msb.GetNextEventID()), + tag.ErrorTypeInvalidHistoryAction, + tag.WorkflowScheduleID(scheduleEventID), + tag.WorkflowStartedID(startedEventID)) + return nil, m.msb.createInternalServerError(opTag) + } + + var event *workflow.HistoryEvent + // Avoid creating new history events when decisions are continuously timing out + if dt.Attempt == 0 { + event = m.msb.hBuilder.AddDecisionTaskTimedOutEvent(scheduleEventID, startedEventID, workflow.TimeoutTypeStartToClose) + } + + if err := m.ReplicateDecisionTaskTimedOutEvent(workflow.TimeoutTypeStartToClose); err != nil { + return nil, err + } + return event, nil +} + +func (m *mutableStateDecisionTaskManagerImpl) FailDecision( + incrementAttempt bool, +) { + // Clear stickiness whenever decision fails + m.msb.ClearStickyness() + + failDecisionInfo := &decisionInfo{ + Version: common.EmptyVersion, + ScheduleID: common.EmptyEventID, + StartedID: common.EmptyEventID, + RequestID: emptyUUID, + DecisionTimeout: 0, + StartedTimestamp: 0, + TaskList: "", + OriginalScheduledTimestamp: 0, + } + if incrementAttempt { + failDecisionInfo.Attempt = m.msb.executionInfo.DecisionAttempt + 1 + failDecisionInfo.ScheduledTimestamp = m.msb.timeSource.Now().UnixNano() + } + m.UpdateDecision(failDecisionInfo) +} + +// DeleteDecision deletes a decision task. +func (m *mutableStateDecisionTaskManagerImpl) DeleteDecision() { + resetDecisionInfo := &decisionInfo{ + Version: common.EmptyVersion, + ScheduleID: common.EmptyEventID, + StartedID: common.EmptyEventID, + RequestID: emptyUUID, + DecisionTimeout: 0, + Attempt: 0, + StartedTimestamp: 0, + ScheduledTimestamp: 0, + TaskList: "", + // Keep the last original scheduled timestamp, so that AddDecisionAsHeartbeat can continue with it. + OriginalScheduledTimestamp: m.getDecisionInfo().OriginalScheduledTimestamp, + } + m.UpdateDecision(resetDecisionInfo) +} + +// UpdateDecision updates a decision task. +func (m *mutableStateDecisionTaskManagerImpl) UpdateDecision( + decision *decisionInfo, +) { + + m.msb.executionInfo.DecisionVersion = decision.Version + m.msb.executionInfo.DecisionScheduleID = decision.ScheduleID + m.msb.executionInfo.DecisionStartedID = decision.StartedID + m.msb.executionInfo.DecisionRequestID = decision.RequestID + m.msb.executionInfo.DecisionTimeout = decision.DecisionTimeout + m.msb.executionInfo.DecisionAttempt = decision.Attempt + m.msb.executionInfo.DecisionStartedTimestamp = decision.StartedTimestamp + m.msb.executionInfo.DecisionScheduledTimestamp = decision.ScheduledTimestamp + m.msb.executionInfo.DecisionOriginalScheduledTimestamp = decision.OriginalScheduledTimestamp + + // NOTE: do not update tasklist in execution info + + m.msb.logger.Debug(fmt.Sprintf( + "Decision Updated: {Schedule: %v, Started: %v, ID: %v, Timeout: %v, Attempt: %v, Timestamp: %v}", + decision.ScheduleID, + decision.StartedID, + decision.RequestID, + decision.DecisionTimeout, + decision.Attempt, + decision.StartedTimestamp, + )) +} + +func (m *mutableStateDecisionTaskManagerImpl) HasPendingDecision() bool { + return m.msb.executionInfo.DecisionScheduleID != common.EmptyEventID +} + +func (m *mutableStateDecisionTaskManagerImpl) GetPendingDecision() (*decisionInfo, bool) { + if m.msb.executionInfo.DecisionScheduleID == common.EmptyEventID { + return nil, false + } + + decision := m.getDecisionInfo() + return decision, true +} + +func (m *mutableStateDecisionTaskManagerImpl) HasInFlightDecision() bool { + return m.msb.executionInfo.DecisionStartedID > 0 +} + +func (m *mutableStateDecisionTaskManagerImpl) GetInFlightDecision() (*decisionInfo, bool) { + if m.msb.executionInfo.DecisionScheduleID == common.EmptyEventID || + m.msb.executionInfo.DecisionStartedID == common.EmptyEventID { + return nil, false + } + + decision := m.getDecisionInfo() + return decision, true +} + +func (m *mutableStateDecisionTaskManagerImpl) HasProcessedOrPendingDecision() bool { + return m.HasPendingDecision() || m.msb.GetPreviousStartedEventID() != common.EmptyEventID +} + +// GetDecisionInfo returns details about the in-progress decision task +func (m *mutableStateDecisionTaskManagerImpl) GetDecisionInfo( + scheduleEventID int64, +) (*decisionInfo, bool) { + decision := m.getDecisionInfo() + if scheduleEventID == decision.ScheduleID { + return decision, true + } + return nil, false +} + +func (m *mutableStateDecisionTaskManagerImpl) CreateTransientDecisionEvents( + decision *decisionInfo, + identity string, +) (*workflow.HistoryEvent, *workflow.HistoryEvent) { + tasklist := m.msb.executionInfo.TaskList + scheduledEvent := newDecisionTaskScheduledEventWithInfo( + decision.ScheduleID, + decision.ScheduledTimestamp, + tasklist, + decision.DecisionTimeout, + decision.Attempt, + ) + + startedEvent := newDecisionTaskStartedEventWithInfo( + decision.StartedID, + decision.StartedTimestamp, + decision.ScheduleID, + decision.RequestID, + identity, + ) + + return scheduledEvent, startedEvent +} + +func (m *mutableStateDecisionTaskManagerImpl) getDecisionInfo() *decisionInfo { + taskList := m.msb.executionInfo.TaskList + if m.msb.IsStickyTaskListEnabled() { + taskList = m.msb.executionInfo.StickyTaskList + } + return &decisionInfo{ + Version: m.msb.executionInfo.DecisionVersion, + ScheduleID: m.msb.executionInfo.DecisionScheduleID, + StartedID: m.msb.executionInfo.DecisionStartedID, + RequestID: m.msb.executionInfo.DecisionRequestID, + DecisionTimeout: m.msb.executionInfo.DecisionTimeout, + Attempt: m.msb.executionInfo.DecisionAttempt, + StartedTimestamp: m.msb.executionInfo.DecisionStartedTimestamp, + ScheduledTimestamp: m.msb.executionInfo.DecisionScheduledTimestamp, + TaskList: taskList, + OriginalScheduledTimestamp: m.msb.executionInfo.DecisionOriginalScheduledTimestamp, + } +} + +func (m *mutableStateDecisionTaskManagerImpl) beforeAddDecisionTaskCompletedEvent() { + // Make sure to delete decision before adding events. Otherwise they are buffered rather than getting appended + m.DeleteDecision() +} + +func (m *mutableStateDecisionTaskManagerImpl) afterAddDecisionTaskCompletedEvent( + event *workflow.HistoryEvent, + maxResetPoints int, +) { + m.msb.executionInfo.LastProcessedEvent = event.GetDecisionTaskCompletedEventAttributes().GetStartedEventId() + m.msb.addBinaryCheckSumIfNotExists(event, maxResetPoints) +} diff --git a/service/history/mutableStateDecisionTaskManager_mock.go b/service/history/mutableStateDecisionTaskManager_mock.go new file mode 100644 index 00000000000..e04cb85ed4b --- /dev/null +++ b/service/history/mutableStateDecisionTaskManager_mock.go @@ -0,0 +1,403 @@ +// The MIT License (MIT) +// +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: service/history/mutableStateDecisionTaskManager.go +//// go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination mutableStateDecisionTaskManager_mock.go + +// Package history is a generated GoMock package. +package history + +import ( + gomock "github.com/golang/mock/gomock" + shared "github.com/uber/cadence/.gen/go/shared" + reflect "reflect" +) + +// MockmutableStateDecisionTaskManager is a mock of mutableStateDecisionTaskManager interface +type MockmutableStateDecisionTaskManager struct { + ctrl *gomock.Controller + recorder *MockmutableStateDecisionTaskManagerMockRecorder +} + +// MockmutableStateDecisionTaskManagerMockRecorder is the mock recorder for MockmutableStateDecisionTaskManager +type MockmutableStateDecisionTaskManagerMockRecorder struct { + mock *MockmutableStateDecisionTaskManager +} + +// NewMockmutableStateDecisionTaskManager creates a new mock instance +func NewMockmutableStateDecisionTaskManager(ctrl *gomock.Controller) *MockmutableStateDecisionTaskManager { + mock := &MockmutableStateDecisionTaskManager{ctrl: ctrl} + mock.recorder = &MockmutableStateDecisionTaskManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockmutableStateDecisionTaskManager) EXPECT() *MockmutableStateDecisionTaskManagerMockRecorder { + return m.recorder +} + +// ReplicateDecisionTaskScheduledEvent mocks base method +func (m *MockmutableStateDecisionTaskManager) ReplicateDecisionTaskScheduledEvent(version, scheduleID int64, taskList string, startToCloseTimeoutSeconds int32, attempt, scheduleTimestamp, originalScheduledTimestamp int64) (*decisionInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReplicateDecisionTaskScheduledEvent", version, scheduleID, taskList, startToCloseTimeoutSeconds, attempt, scheduleTimestamp, originalScheduledTimestamp) + ret0, _ := ret[0].(*decisionInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReplicateDecisionTaskScheduledEvent indicates an expected call of ReplicateDecisionTaskScheduledEvent +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) ReplicateDecisionTaskScheduledEvent(version, scheduleID, taskList, startToCloseTimeoutSeconds, attempt, scheduleTimestamp, originalScheduledTimestamp interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReplicateDecisionTaskScheduledEvent", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).ReplicateDecisionTaskScheduledEvent), version, scheduleID, taskList, startToCloseTimeoutSeconds, attempt, scheduleTimestamp, originalScheduledTimestamp) +} + +// ReplicateTransientDecisionTaskScheduled mocks base method +func (m *MockmutableStateDecisionTaskManager) ReplicateTransientDecisionTaskScheduled() (*decisionInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReplicateTransientDecisionTaskScheduled") + ret0, _ := ret[0].(*decisionInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReplicateTransientDecisionTaskScheduled indicates an expected call of ReplicateTransientDecisionTaskScheduled +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) ReplicateTransientDecisionTaskScheduled() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReplicateTransientDecisionTaskScheduled", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).ReplicateTransientDecisionTaskScheduled)) +} + +// ReplicateDecisionTaskFailedEvent mocks base method +func (m *MockmutableStateDecisionTaskManager) ReplicateDecisionTaskFailedEvent() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReplicateDecisionTaskFailedEvent") + ret0, _ := ret[0].(error) + return ret0 +} + +// ReplicateDecisionTaskFailedEvent indicates an expected call of ReplicateDecisionTaskFailedEvent +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) ReplicateDecisionTaskFailedEvent() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReplicateDecisionTaskFailedEvent", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).ReplicateDecisionTaskFailedEvent)) +} + +// ReplicateDecisionTaskTimedOutEvent mocks base method +func (m *MockmutableStateDecisionTaskManager) ReplicateDecisionTaskTimedOutEvent(timeoutType shared.TimeoutType) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReplicateDecisionTaskTimedOutEvent", timeoutType) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReplicateDecisionTaskTimedOutEvent indicates an expected call of ReplicateDecisionTaskTimedOutEvent +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) ReplicateDecisionTaskTimedOutEvent(timeoutType interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReplicateDecisionTaskTimedOutEvent", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).ReplicateDecisionTaskTimedOutEvent), timeoutType) +} + +// ReplicateDecisionTaskCompletedEvent mocks base method +func (m *MockmutableStateDecisionTaskManager) ReplicateDecisionTaskCompletedEvent(event *shared.HistoryEvent) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReplicateDecisionTaskCompletedEvent", event) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReplicateDecisionTaskCompletedEvent indicates an expected call of ReplicateDecisionTaskCompletedEvent +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) ReplicateDecisionTaskCompletedEvent(event interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReplicateDecisionTaskCompletedEvent", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).ReplicateDecisionTaskCompletedEvent), event) +} + +// ReplicateDecisionTaskStartedEvent mocks base method +func (m *MockmutableStateDecisionTaskManager) ReplicateDecisionTaskStartedEvent(decision *decisionInfo, version, scheduleID, startedID int64, requestID string, timestamp int64) (*decisionInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReplicateDecisionTaskStartedEvent", decision, version, scheduleID, startedID, requestID, timestamp) + ret0, _ := ret[0].(*decisionInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReplicateDecisionTaskStartedEvent indicates an expected call of ReplicateDecisionTaskStartedEvent +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) ReplicateDecisionTaskStartedEvent(decision, version, scheduleID, startedID, requestID, timestamp interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReplicateDecisionTaskStartedEvent", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).ReplicateDecisionTaskStartedEvent), decision, version, scheduleID, startedID, requestID, timestamp) +} + +// AddDecisionTaskFailedEvent mocks base method +func (m *MockmutableStateDecisionTaskManager) AddDecisionTaskFailedEvent(scheduleEventID, startedEventID int64, cause shared.DecisionTaskFailedCause, details []byte, identity, reason, baseRunID, newRunID string, forkEventVersion int64) (*shared.HistoryEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddDecisionTaskFailedEvent", scheduleEventID, startedEventID, cause, details, identity, reason, baseRunID, newRunID, forkEventVersion) + ret0, _ := ret[0].(*shared.HistoryEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddDecisionTaskFailedEvent indicates an expected call of AddDecisionTaskFailedEvent +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) AddDecisionTaskFailedEvent(scheduleEventID, startedEventID, cause, details, identity, reason, baseRunID, newRunID, forkEventVersion interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddDecisionTaskFailedEvent", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).AddDecisionTaskFailedEvent), scheduleEventID, startedEventID, cause, details, identity, reason, baseRunID, newRunID, forkEventVersion) +} + +// AddDecisionTaskScheduleToStartTimeoutEvent mocks base method +func (m *MockmutableStateDecisionTaskManager) AddDecisionTaskScheduleToStartTimeoutEvent(scheduleEventID int64) (*shared.HistoryEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddDecisionTaskScheduleToStartTimeoutEvent", scheduleEventID) + ret0, _ := ret[0].(*shared.HistoryEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddDecisionTaskScheduleToStartTimeoutEvent indicates an expected call of AddDecisionTaskScheduleToStartTimeoutEvent +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) AddDecisionTaskScheduleToStartTimeoutEvent(scheduleEventID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddDecisionTaskScheduleToStartTimeoutEvent", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).AddDecisionTaskScheduleToStartTimeoutEvent), scheduleEventID) +} + +// AddDecisionTaskTimedOutEvent mocks base method +func (m *MockmutableStateDecisionTaskManager) AddDecisionTaskTimedOutEvent(scheduleEventID, startedEventID int64) (*shared.HistoryEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddDecisionTaskTimedOutEvent", scheduleEventID, startedEventID) + ret0, _ := ret[0].(*shared.HistoryEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddDecisionTaskTimedOutEvent indicates an expected call of AddDecisionTaskTimedOutEvent +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) AddDecisionTaskTimedOutEvent(scheduleEventID, startedEventID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddDecisionTaskTimedOutEvent", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).AddDecisionTaskTimedOutEvent), scheduleEventID, startedEventID) +} + +// AddDecisionTaskCompletedEvent mocks base method +func (m *MockmutableStateDecisionTaskManager) AddDecisionTaskCompletedEvent(scheduleEventID, startedEventID int64, request *shared.RespondDecisionTaskCompletedRequest, maxResetPoints int) (*shared.HistoryEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddDecisionTaskCompletedEvent", scheduleEventID, startedEventID, request, maxResetPoints) + ret0, _ := ret[0].(*shared.HistoryEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddDecisionTaskCompletedEvent indicates an expected call of AddDecisionTaskCompletedEvent +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) AddDecisionTaskCompletedEvent(scheduleEventID, startedEventID, request, maxResetPoints interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddDecisionTaskCompletedEvent", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).AddDecisionTaskCompletedEvent), scheduleEventID, startedEventID, request, maxResetPoints) +} + +// AddDecisionTaskStartedEvent mocks base method +func (m *MockmutableStateDecisionTaskManager) AddDecisionTaskStartedEvent(scheduleEventID int64, requestID string, request *shared.PollForDecisionTaskRequest) (*shared.HistoryEvent, *decisionInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddDecisionTaskStartedEvent", scheduleEventID, requestID, request) + ret0, _ := ret[0].(*shared.HistoryEvent) + ret1, _ := ret[1].(*decisionInfo) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// AddDecisionTaskStartedEvent indicates an expected call of AddDecisionTaskStartedEvent +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) AddDecisionTaskStartedEvent(scheduleEventID, requestID, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddDecisionTaskStartedEvent", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).AddDecisionTaskStartedEvent), scheduleEventID, requestID, request) +} + +// AddDecisionTaskScheduledEventAsHeartbeat mocks base method +func (m *MockmutableStateDecisionTaskManager) AddDecisionTaskScheduledEventAsHeartbeat(bypassTaskGeneration bool, originalScheduledTimestamp int64) (*decisionInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddDecisionTaskScheduledEventAsHeartbeat", bypassTaskGeneration, originalScheduledTimestamp) + ret0, _ := ret[0].(*decisionInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddDecisionTaskScheduledEventAsHeartbeat indicates an expected call of AddDecisionTaskScheduledEventAsHeartbeat +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) AddDecisionTaskScheduledEventAsHeartbeat(bypassTaskGeneration, originalScheduledTimestamp interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddDecisionTaskScheduledEventAsHeartbeat", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).AddDecisionTaskScheduledEventAsHeartbeat), bypassTaskGeneration, originalScheduledTimestamp) +} + +// AddDecisionTaskScheduledEvent mocks base method +func (m *MockmutableStateDecisionTaskManager) AddDecisionTaskScheduledEvent(bypassTaskGeneration bool) (*decisionInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddDecisionTaskScheduledEvent", bypassTaskGeneration) + ret0, _ := ret[0].(*decisionInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddDecisionTaskScheduledEvent indicates an expected call of AddDecisionTaskScheduledEvent +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) AddDecisionTaskScheduledEvent(bypassTaskGeneration interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddDecisionTaskScheduledEvent", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).AddDecisionTaskScheduledEvent), bypassTaskGeneration) +} + +// AddFirstDecisionTaskScheduled mocks base method +func (m *MockmutableStateDecisionTaskManager) AddFirstDecisionTaskScheduled(startEvent *shared.HistoryEvent) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddFirstDecisionTaskScheduled", startEvent) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddFirstDecisionTaskScheduled indicates an expected call of AddFirstDecisionTaskScheduled +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) AddFirstDecisionTaskScheduled(startEvent interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddFirstDecisionTaskScheduled", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).AddFirstDecisionTaskScheduled), startEvent) +} + +// FailDecision mocks base method +func (m *MockmutableStateDecisionTaskManager) FailDecision(incrementAttempt bool) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "FailDecision", incrementAttempt) +} + +// FailDecision indicates an expected call of FailDecision +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) FailDecision(incrementAttempt interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FailDecision", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).FailDecision), incrementAttempt) +} + +// DeleteDecision mocks base method +func (m *MockmutableStateDecisionTaskManager) DeleteDecision() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteDecision") +} + +// DeleteDecision indicates an expected call of DeleteDecision +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) DeleteDecision() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteDecision", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).DeleteDecision)) +} + +// UpdateDecision mocks base method +func (m *MockmutableStateDecisionTaskManager) UpdateDecision(decision *decisionInfo) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "UpdateDecision", decision) +} + +// UpdateDecision indicates an expected call of UpdateDecision +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) UpdateDecision(decision interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateDecision", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).UpdateDecision), decision) +} + +// HasPendingDecision mocks base method +func (m *MockmutableStateDecisionTaskManager) HasPendingDecision() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasPendingDecision") + ret0, _ := ret[0].(bool) + return ret0 +} + +// HasPendingDecision indicates an expected call of HasPendingDecision +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) HasPendingDecision() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasPendingDecision", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).HasPendingDecision)) +} + +// GetPendingDecision mocks base method +func (m *MockmutableStateDecisionTaskManager) GetPendingDecision() (*decisionInfo, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPendingDecision") + ret0, _ := ret[0].(*decisionInfo) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetPendingDecision indicates an expected call of GetPendingDecision +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) GetPendingDecision() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingDecision", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).GetPendingDecision)) +} + +// HasInFlightDecision mocks base method +func (m *MockmutableStateDecisionTaskManager) HasInFlightDecision() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasInFlightDecision") + ret0, _ := ret[0].(bool) + return ret0 +} + +// HasInFlightDecision indicates an expected call of HasInFlightDecision +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) HasInFlightDecision() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasInFlightDecision", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).HasInFlightDecision)) +} + +// GetInFlightDecision mocks base method +func (m *MockmutableStateDecisionTaskManager) GetInFlightDecision() (*decisionInfo, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetInFlightDecision") + ret0, _ := ret[0].(*decisionInfo) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetInFlightDecision indicates an expected call of GetInFlightDecision +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) GetInFlightDecision() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInFlightDecision", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).GetInFlightDecision)) +} + +// HasProcessedOrPendingDecision mocks base method +func (m *MockmutableStateDecisionTaskManager) HasProcessedOrPendingDecision() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasProcessedOrPendingDecision") + ret0, _ := ret[0].(bool) + return ret0 +} + +// HasProcessedOrPendingDecision indicates an expected call of HasProcessedOrPendingDecision +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) HasProcessedOrPendingDecision() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasProcessedOrPendingDecision", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).HasProcessedOrPendingDecision)) +} + +// GetDecisionInfo mocks base method +func (m *MockmutableStateDecisionTaskManager) GetDecisionInfo(scheduleEventID int64) (*decisionInfo, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDecisionInfo", scheduleEventID) + ret0, _ := ret[0].(*decisionInfo) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetDecisionInfo indicates an expected call of GetDecisionInfo +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) GetDecisionInfo(scheduleEventID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDecisionInfo", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).GetDecisionInfo), scheduleEventID) +} + +// CreateTransientDecisionEvents mocks base method +func (m *MockmutableStateDecisionTaskManager) CreateTransientDecisionEvents(decision *decisionInfo, identity string) (*shared.HistoryEvent, *shared.HistoryEvent) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateTransientDecisionEvents", decision, identity) + ret0, _ := ret[0].(*shared.HistoryEvent) + ret1, _ := ret[1].(*shared.HistoryEvent) + return ret0, ret1 +} + +// CreateTransientDecisionEvents indicates an expected call of CreateTransientDecisionEvents +func (mr *MockmutableStateDecisionTaskManagerMockRecorder) CreateTransientDecisionEvents(decision, identity interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateTransientDecisionEvents", reflect.TypeOf((*MockmutableStateDecisionTaskManager)(nil).CreateTransientDecisionEvents), decision, identity) +} diff --git a/service/history/nDCBranchMgr.go b/service/history/nDCBranchMgr.go index 609b5b7fe65..0a4520815a3 100644 --- a/service/history/nDCBranchMgr.go +++ b/service/history/nDCBranchMgr.go @@ -165,7 +165,7 @@ func (r *nDCBranchMgrImpl) createNewBranch( resp, err := r.historyV2Mgr.ForkHistoryBranch(&persistence.ForkHistoryBranchRequest{ ForkBranchToken: baseBranchToken, ForkNodeID: baseBranchLastEventID + 1, - Info: historyGarbageCleanupInfo(domainID, workflowID, uuid.New()), + Info: persistence.BuildHistoryGarbageCleanupInfo(domainID, workflowID, uuid.New()), ShardID: common.IntPtr(shardID), }) if err != nil { diff --git a/service/history/nDCWorkflowResetter.go b/service/history/nDCWorkflowResetter.go index 75567d96232..afaedf449b7 100644 --- a/service/history/nDCWorkflowResetter.go +++ b/service/history/nDCWorkflowResetter.go @@ -106,7 +106,7 @@ func (r *nDCWorkflowResetterImpl) resetWorkflow( resp, err := r.historyV2Mgr.ForkHistoryBranch(&persistence.ForkHistoryBranchRequest{ ForkBranchToken: baseBranchToken, ForkNodeID: baseEventID + 1, - Info: historyGarbageCleanupInfo(r.domainID, r.workflowID, r.newRunID), + Info: persistence.BuildHistoryGarbageCleanupInfo(r.domainID, r.workflowID, r.newRunID), ShardID: common.IntPtr(shardID), }) if err != nil { diff --git a/service/history/nDCWorkflowResetter_test.go b/service/history/nDCWorkflowResetter_test.go index a03f57d1764..3cc0df86eb8 100644 --- a/service/history/nDCWorkflowResetter_test.go +++ b/service/history/nDCWorkflowResetter_test.go @@ -184,7 +184,7 @@ func (s *nDCWorkflowResetterSuite) TestResetWorkflow() { s.mockHistoryV2Mgr.On("ForkHistoryBranch", &persistence.ForkHistoryBranchRequest{ ForkBranchToken: branchToken, ForkNodeID: baseEventID + 1, - Info: historyGarbageCleanupInfo(s.domainID, s.workflowID, s.newRunID), + Info: persistence.BuildHistoryGarbageCleanupInfo(s.domainID, s.workflowID, s.newRunID), ShardID: common.IntPtr(s.mockShard.GetShardID()), }).Return(&persistence.ForkHistoryBranchResponse{NewBranchToken: newBranchToken}, nil).Times(1) s.mockHistoryV2Mgr.On("CompleteForkBranch", &persistence.CompleteForkBranchRequest{ diff --git a/service/history/replicatorQueueProcessor.go b/service/history/replicatorQueueProcessor.go index ed3c913291f..8c9bee6ef95 100644 --- a/service/history/replicatorQueueProcessor.go +++ b/service/history/replicatorQueueProcessor.go @@ -636,6 +636,7 @@ func (p *replicatorQueueProcessorImpl) generateSyncActivityTask( Attempt: common.Int32Ptr(activityInfo.Attempt), LastFailureReason: common.StringPtr(activityInfo.LastFailureReason), LastWorkerIdentity: common.StringPtr(activityInfo.LastWorkerIdentity), + LastFailureDetails: activityInfo.LastFailureDetails, }, }, nil }, diff --git a/service/history/replicatorQueueProcessor_test.go b/service/history/replicatorQueueProcessor_test.go index 15f1cfb47de..35c60f3b535 100644 --- a/service/history/replicatorQueueProcessor_test.go +++ b/service/history/replicatorQueueProcessor_test.go @@ -291,6 +291,7 @@ func (s *replicatorQueueProcessorSuite) TestSyncActivity_ActivityRetry() { activityDetails := []byte("some random activity progress") activityLastFailureReason := "some random reason" activityLastWorkerIdentity := "some random worker identity" + activityLastFailureDetails := []byte("some random failure details") msBuilder.On("IsWorkflowExecutionRunning").Return(true) msBuilder.On("GetReplicationState").Return(&persistence.ReplicationState{ @@ -313,6 +314,7 @@ func (s *replicatorQueueProcessorSuite) TestSyncActivity_ActivityRetry() { Attempt: activityAttempt, LastFailureReason: activityLastFailureReason, LastWorkerIdentity: activityLastWorkerIdentity, + LastFailureDetails: activityLastFailureDetails, }, true) s.mockMetadataMgr.On("GetDomain", &persistence.GetDomainRequest{ID: domainID}).Return( &persistence.GetDomainResponse{ @@ -345,6 +347,7 @@ func (s *replicatorQueueProcessorSuite) TestSyncActivity_ActivityRetry() { Attempt: common.Int32Ptr(activityAttempt), LastFailureReason: common.StringPtr(activityLastFailureReason), LastWorkerIdentity: common.StringPtr(activityLastWorkerIdentity), + LastFailureDetails: activityLastFailureDetails, }, }).Return(nil).Once() @@ -392,6 +395,7 @@ func (s *replicatorQueueProcessorSuite) TestSyncActivity_ActivityRunning() { activityDetails := []byte("some random activity progress") activityLastFailureReason := "some random reason" activityLastWorkerIdentity := "some random worker identity" + activityLastFailureDetails := []byte("some random failure details") msBuilder.On("IsWorkflowExecutionRunning").Return(true) msBuilder.On("GetReplicationState").Return(&persistence.ReplicationState{ @@ -414,6 +418,7 @@ func (s *replicatorQueueProcessorSuite) TestSyncActivity_ActivityRunning() { Attempt: activityAttempt, LastFailureReason: activityLastFailureReason, LastWorkerIdentity: activityLastWorkerIdentity, + LastFailureDetails: activityLastFailureDetails, }, true) s.mockMetadataMgr.On("GetDomain", &persistence.GetDomainRequest{ID: domainID}).Return( &persistence.GetDomainResponse{ @@ -446,6 +451,7 @@ func (s *replicatorQueueProcessorSuite) TestSyncActivity_ActivityRunning() { Attempt: common.Int32Ptr(activityAttempt), LastFailureReason: common.StringPtr(activityLastFailureReason), LastWorkerIdentity: common.StringPtr(activityLastWorkerIdentity), + LastFailureDetails: activityLastFailureDetails, }, }).Return(nil).Once() diff --git a/service/history/service.go b/service/history/service.go index 1ef1fc345fd..847caf58674 100644 --- a/service/history/service.go +++ b/service/history/service.go @@ -139,6 +139,13 @@ type Config struct { EnableEventsV2 dynamicconfig.BoolPropertyFnWithDomainFilter // whether or not using Terminate as default ParentClosePolicy, otherwise use Abandon for backward compatibility UseTerminateAsDefaultParentClosePolicy dynamicconfig.BoolPropertyFnWithDomainFilter + // whether or not enable system workers for processing parent close policy task + EnableParentClosePolicyWorker dynamicconfig.BoolPropertyFn + // parent close policy will be processed by sys workers(if enabled) if + // the number of children greater than or equal to this threshold + ParentClosePolicyThreshold dynamicconfig.IntPropertyFnWithDomainFilter + // total number of parentClosePolicy system workflows + NumParentClosePolicySystemWorkflows dynamicconfig.IntPropertyFn // Archival settings NumArchiveSystemWorkflows dynamicconfig.IntPropertyFn @@ -246,6 +253,9 @@ func NewConfig(dc *dynamicconfig.Collection, numberOfShards int, storeType strin EventEncodingType: dc.GetStringPropertyFnWithDomainFilter(dynamicconfig.DefaultEventEncoding, string(common.EncodingTypeThriftRW)), EnableEventsV2: dc.GetBoolPropertyFnWithDomainFilter(dynamicconfig.EnableEventsV2, true), UseTerminateAsDefaultParentClosePolicy: dc.GetBoolPropertyFnWithDomainFilter(dynamicconfig.UseTerminateAsDefaultParentClosePolicy, false), + NumParentClosePolicySystemWorkflows: dc.GetIntProperty(dynamicconfig.NumParentClosePolicySystemWorkflows, 10), + EnableParentClosePolicyWorker: dc.GetBoolProperty(dynamicconfig.EnableParentClosePolicyWorker, true), + ParentClosePolicyThreshold: dc.GetIntPropertyFilteredByDomain(dynamicconfig.ParentClosePolicyThreshold, 10), NumArchiveSystemWorkflows: dc.GetIntProperty(dynamicconfig.NumArchiveSystemWorkflows, 1000), ArchiveRequestRPS: dc.GetIntProperty(dynamicconfig.ArchiveRequestRPS, 300), // should be much smaller than frontend RPS diff --git a/service/history/stateBuilder.go b/service/history/stateBuilder.go index 7538214721e..3019652a16d 100644 --- a/service/history/stateBuilder.go +++ b/service/history/stateBuilder.go @@ -409,7 +409,7 @@ func (b *stateBuilderImpl) applyEvents( case shared.EventTypeRequestCancelExternalWorkflowExecutionInitiated: // Create a new request ID which is used by transfer queue processor if domain is failed over at this point cancelRequestID := uuid.New() - rci, err := b.msBuilder.ReplicateRequestCancelExternalWorkflowExecutionInitiatedEvent(event, cancelRequestID) + rci, err := b.msBuilder.ReplicateRequestCancelExternalWorkflowExecutionInitiatedEvent(firstEvent.GetEventId(), event, cancelRequestID) if err != nil { return nil, nil, nil, err } @@ -440,7 +440,7 @@ func (b *stateBuilderImpl) applyEvents( case shared.EventTypeSignalExternalWorkflowExecutionInitiated: // Create a new request ID which is used by transfer queue processor if domain is failed over at this point signalRequestID := uuid.New() - si, err := b.msBuilder.ReplicateSignalExternalWorkflowExecutionInitiatedEvent(event, signalRequestID) + si, err := b.msBuilder.ReplicateSignalExternalWorkflowExecutionInitiatedEvent(firstEvent.GetEventId(), event, signalRequestID) if err != nil { return nil, nil, nil, err } diff --git a/service/history/stateBuilder_test.go b/service/history/stateBuilder_test.go index 1b51a69f51e..292006f8ada 100644 --- a/service/history/stateBuilder_test.go +++ b/service/history/stateBuilder_test.go @@ -1241,7 +1241,7 @@ func (s *stateBuilderSuite) TestApplyEvents_EventTypeSignalExternalWorkflowExecu } // the cancellation request ID is generated inside, cannot assert equal - s.mockMutableState.On("ReplicateSignalExternalWorkflowExecutionInitiatedEvent", event, mock.Anything).Return(si, nil).Once() + s.mockMutableState.On("ReplicateSignalExternalWorkflowExecutionInitiatedEvent", event.GetEventId(), event, mock.Anything).Return(si, nil).Once() s.mockMetadataMgr.On("GetDomain", &persistence.GetDomainRequest{Name: targetDomain}).Return( &persistence.GetDomainResponse{ Info: &persistence.DomainInfo{ID: targetDomainID, Name: targetDomain}, @@ -1348,7 +1348,7 @@ func (s *stateBuilderSuite) TestApplyEvents_EventTypeRequestCancelExternalWorkfl } // the cancellation request ID is generated inside, cannot assert equal - s.mockMutableState.On("ReplicateRequestCancelExternalWorkflowExecutionInitiatedEvent", event, mock.Anything).Return(rci, nil).Once() + s.mockMutableState.On("ReplicateRequestCancelExternalWorkflowExecutionInitiatedEvent", event.GetEventId(), event, mock.Anything).Return(rci, nil).Once() s.mockMetadataMgr.On("GetDomain", &persistence.GetDomainRequest{Name: targetDomain}).Return( &persistence.GetDomainResponse{ Info: &persistence.DomainInfo{ID: targetDomainID, Name: targetDomain}, diff --git a/service/history/timerQueueActiveProcessor.go b/service/history/timerQueueActiveProcessor.go index 4e1c5f82d0a..3c00f52f117 100644 --- a/service/history/timerQueueActiveProcessor.go +++ b/service/history/timerQueueActiveProcessor.go @@ -432,7 +432,7 @@ ExpireActivityTimers: if timeoutType != workflow.TimeoutTypeScheduleToStart { // ScheduleToStart (queue timeout) is not retriable. Instead of retry, customer should set larger // ScheduleToStart timeout. - ok, err := msBuilder.RetryActivity(ai, getTimeoutErrorReason(timeoutType)) + ok, err := msBuilder.RetryActivity(ai, getTimeoutErrorReason(timeoutType), nil) if err != nil { return err } diff --git a/service/history/transferQueueActiveProcessor.go b/service/history/transferQueueActiveProcessor.go index d74fc607a77..2fa89793760 100644 --- a/service/history/transferQueueActiveProcessor.go +++ b/service/history/transferQueueActiveProcessor.go @@ -23,7 +23,6 @@ package history import ( ctx "context" "fmt" - "github.com/pborman/uuid" h "github.com/uber/cadence/.gen/go/history" @@ -36,22 +35,24 @@ import ( "github.com/uber/cadence/common/log/tag" "github.com/uber/cadence/common/metrics" "github.com/uber/cadence/common/persistence" + "github.com/uber/cadence/service/worker/parentclosepolicy" ) const identityHistoryService = "history-service" type ( transferQueueActiveProcessorImpl struct { - currentClusterName string - shard ShardContext - historyService *historyEngineImpl - options *QueueProcessorOptions - historyClient history.Client - cache *historyCache - transferTaskFilter queueTaskFilter - logger log.Logger - metricsClient metrics.Client - maxReadAckLevel maxReadAckLevel + currentClusterName string + shard ShardContext + historyService *historyEngineImpl + options *QueueProcessorOptions + historyClient history.Client + cache *historyCache + transferTaskFilter queueTaskFilter + logger log.Logger + metricsClient metrics.Client + parentClosePolicyClient parentclosepolicy.Client + maxReadAckLevel maxReadAckLevel *transferQueueProcessorBase *queueProcessorBase queueAckMgr @@ -100,14 +101,22 @@ func newTransferQueueActiveProcessor( return nil } + parentClosePolicyClient := parentclosepolicy.NewClient( + shard.GetMetricsClient(), + shard.GetLogger(), + historyService.publicClient, + shard.GetConfig().NumParentClosePolicySystemWorkflows()) + processor := &transferQueueActiveProcessorImpl{ - currentClusterName: currentClusterName, - shard: shard, - historyService: historyService, - options: options, - historyClient: historyClient, - logger: logger, - metricsClient: historyService.metricsClient, + currentClusterName: currentClusterName, + shard: shard, + historyService: historyService, + options: options, + historyClient: historyClient, + logger: logger, + metricsClient: historyService.metricsClient, + parentClosePolicyClient: parentClosePolicyClient, + cache: historyService.historyCache, transferTaskFilter: transferTaskFilter, transferQueueProcessorBase: newTransferQueueProcessorBase( @@ -464,6 +473,8 @@ func (t *transferQueueActiveProcessorImpl) processCloseExecution( workflowExecutionTimestamp := getWorkflowExecutionTimestamp(msBuilder, startEvent) visibilityMemo := getWorkflowMemo(executionInfo.Memo) searchAttr := executionInfo.SearchAttributes + domainName := msBuilder.GetDomainName() + children := msBuilder.GetPendingChildExecutionInfos() // release the context lock since we no longer need mutable state builder and // the rest of logic is making RPC call, which takes time. @@ -509,9 +520,89 @@ func (t *transferQueueActiveProcessorImpl) processCloseExecution( err = nil } } + + if err != nil { + return err + } + + if len(children) > 0 { + err = t.processParentClosePolicy(domainName, domainID, children) + } return err } +func (t *transferQueueActiveProcessorImpl) processParentClosePolicy(domainName, domainUUID string, children map[int64]*persistence.ChildExecutionInfo) error { + scope := t.metricsClient.Scope(metrics.TransferActiveTaskCloseExecutionScope) + + if t.shard.GetConfig().EnableParentClosePolicyWorker() && len(children) >= t.shard.GetConfig().ParentClosePolicyThreshold(domainName) { + executions := make([]parentclosepolicy.RequestDetail, 0, len(children)) + for _, ch := range children { + if ch.ParentClosePolicy == workflow.ParentClosePolicyAbandon { + continue + } + + executions = append(executions, parentclosepolicy.RequestDetail{ + WorkflowID: ch.StartedWorkflowID, + RunID: ch.StartedRunID, + Policy: ch.ParentClosePolicy, + }) + } + + request := parentclosepolicy.Request{ + DomainName: domainName, + DomainUUID: domainUUID, + Executions: executions, + } + err := t.parentClosePolicyClient.SendParentClosePolicyRequest(request) + if err != nil { + return err + } + } else { + for _, child := range children { + var err error + switch child.ParentClosePolicy { + case workflow.ParentClosePolicyAbandon: + //no-op + continue + case workflow.ParentClosePolicyTerminate: + err = t.historyClient.TerminateWorkflowExecution(nil, &h.TerminateWorkflowExecutionRequest{ + DomainUUID: common.StringPtr(domainUUID), + TerminateRequest: &workflow.TerminateWorkflowExecutionRequest{ + Domain: common.StringPtr(domainName), + WorkflowExecution: &workflow.WorkflowExecution{ + WorkflowId: common.StringPtr(child.StartedWorkflowID), + RunId: common.StringPtr(child.StartedRunID), + }, + Reason: common.StringPtr("by parent close policy"), + Identity: common.StringPtr(identityHistoryService), + }, + }) + case workflow.ParentClosePolicyRequestCancel: + err = t.historyClient.RequestCancelWorkflowExecution(nil, &h.RequestCancelWorkflowExecutionRequest{ + DomainUUID: common.StringPtr(domainUUID), + CancelRequest: &workflow.RequestCancelWorkflowExecutionRequest{ + Domain: common.StringPtr(domainName), + WorkflowExecution: &workflow.WorkflowExecution{ + WorkflowId: common.StringPtr(child.StartedWorkflowID), + RunId: common.StringPtr(child.StartedRunID), + }, + Identity: common.StringPtr(identityHistoryService), + }, + }) + } + + if err != nil { + if _, ok := err.(*workflow.EntityNotExistsError); !ok { + scope.IncCounter(metrics.ParentClosePolicyProcessorFailures) + return err + } + } + scope.IncCounter(metrics.ParentClosePolicyProcessorSuccess) + } + } + return nil +} + func (t *transferQueueActiveProcessorImpl) processCancelExecution( task *persistence.TransferTaskInfo, ) (retError error) { diff --git a/service/history/transferQueueActiveProcessor_test.go b/service/history/transferQueueActiveProcessor_test.go index b2b47737cca..db3c9065052 100644 --- a/service/history/transferQueueActiveProcessor_test.go +++ b/service/history/transferQueueActiveProcessor_test.go @@ -51,35 +51,37 @@ import ( p "github.com/uber/cadence/common/persistence" "github.com/uber/cadence/common/service" dc "github.com/uber/cadence/common/service/dynamicconfig" + "github.com/uber/cadence/service/worker/parentclosepolicy" ) type ( transferQueueActiveProcessorSuite struct { suite.Suite - controller *gomock.Controller - mockShardManager *mocks.ShardManager - mockHistoryEngine *historyEngineImpl - mockMetadataMgr *mocks.MetadataManager - mockVisibilityMgr *mocks.VisibilityManager - mockExecutionMgr *mocks.ExecutionManager - mockHistoryMgr *mocks.HistoryManager - mockHistoryV2Mgr *mocks.HistoryV2Manager - mockMatchingClient *matchingservicetest.MockClient - mockHistoryClient *historyservicetest.MockClient - mockShard ShardContext - mockClusterMetadata *mocks.ClusterMetadata - mockProducer *mocks.KafkaProducer - mockMessagingClient messaging.Client - mockQueueAckMgr *MockQueueAckMgr - mockClientBean *client.MockClientBean - mockService service.Service - logger log.Logger - mockTxProcessor *MockTransferQueueProcessor - mockReplicationProcessor *MockReplicatorQueueProcessor - mockTimerProcessor *MockTimerQueueProcessor - mockArchivalMetadata *archiver.MockArchivalMetadata - mockArchiverProvider *provider.MockArchiverProvider + controller *gomock.Controller + mockShardManager *mocks.ShardManager + mockHistoryEngine *historyEngineImpl + mockMetadataMgr *mocks.MetadataManager + mockVisibilityMgr *mocks.VisibilityManager + mockExecutionMgr *mocks.ExecutionManager + mockHistoryMgr *mocks.HistoryManager + mockHistoryV2Mgr *mocks.HistoryV2Manager + mockMatchingClient *matchingservicetest.MockClient + mockHistoryClient *historyservicetest.MockClient + mockShard ShardContext + mockClusterMetadata *mocks.ClusterMetadata + mockProducer *mocks.KafkaProducer + mockMessagingClient messaging.Client + mockQueueAckMgr *MockQueueAckMgr + mockClientBean *client.MockClientBean + mockService service.Service + logger log.Logger + mockTxProcessor *MockTransferQueueProcessor + mockReplicationProcessor *MockReplicatorQueueProcessor + mockTimerProcessor *MockTimerQueueProcessor + mockArchivalMetadata *archiver.MockArchivalMetadata + mockArchiverProvider *provider.MockArchiverProvider + mockParentClosePolicyClient *parentclosepolicy.ClientMock domainID string domainEntry *cache.DomainCacheEntry @@ -208,6 +210,9 @@ func (s *transferQueueActiveProcessorSuite) SetupTest() { s.transferQueueActiveProcessor.queueAckMgr = s.mockQueueAckMgr s.transferQueueActiveProcessor.queueProcessorBase.ackMgr = s.mockQueueAckMgr + s.mockParentClosePolicyClient = &parentclosepolicy.ClientMock{} + s.transferQueueActiveProcessor.parentClosePolicyClient = s.mockParentClosePolicyClient + s.domainID = validDomainID s.domainEntry = cache.NewLocalDomainCacheEntryForTest(&persistence.DomainInfo{ID: s.domainID}, &persistence.DomainConfig{}, "", nil) } @@ -757,6 +762,243 @@ func (s *transferQueueActiveProcessorSuite) TestProcessCloseExecution_NoParent() s.Nil(err) } +func (s *transferQueueActiveProcessorSuite) TestProcessCloseExecution_NoParent_HasFewChildren() { + + execution := workflow.WorkflowExecution{ + WorkflowId: common.StringPtr("some random workflow ID"), + RunId: common.StringPtr(uuid.New()), + } + workflowType := "some random workflow type" + taskListName := "some random task list" + + msBuilder := newMutableStateBuilderWithReplicationStateWithEventV2(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := msBuilder.AddWorkflowExecutionStartedEvent( + s.domainEntry, + execution, + &history.StartWorkflowExecutionRequest{ + DomainUUID: common.StringPtr(s.domainID), + StartRequest: &workflow.StartWorkflowExecutionRequest{ + WorkflowType: &workflow.WorkflowType{Name: common.StringPtr(workflowType)}, + TaskList: &workflow.TaskList{Name: common.StringPtr(taskListName)}, + ExecutionStartToCloseTimeoutSeconds: common.Int32Ptr(2), + TaskStartToCloseTimeoutSeconds: common.Int32Ptr(1), + }, + }, + ) + s.Nil(err) + + di := addDecisionTaskScheduledEvent(msBuilder) + event := addDecisionTaskStartedEvent(msBuilder, di.ScheduleID, taskListName, uuid.New()) + di.StartedID = event.GetEventId() + + dt := workflow.DecisionTypeStartChildWorkflowExecution + parentClosePolicy1 := workflow.ParentClosePolicyAbandon + parentClosePolicy2 := workflow.ParentClosePolicyTerminate + parentClosePolicy3 := workflow.ParentClosePolicyRequestCancel + + event, _ = msBuilder.AddDecisionTaskCompletedEvent(di.ScheduleID, di.StartedID, &workflow.RespondDecisionTaskCompletedRequest{ + ExecutionContext: nil, + Identity: common.StringPtr("some random identity"), + Decisions: []*workflow.Decision{ + &workflow.Decision{ + DecisionType: &dt, + StartChildWorkflowExecutionDecisionAttributes: &workflow.StartChildWorkflowExecutionDecisionAttributes{ + WorkflowId: common.StringPtr("child workflow1"), + WorkflowType: &workflow.WorkflowType{ + Name: common.StringPtr("child workflow type"), + }, + TaskList: &workflow.TaskList{Name: common.StringPtr(taskListName)}, + Input: []byte("random input"), + ParentClosePolicy: &parentClosePolicy1, + }, + }, + &workflow.Decision{ + DecisionType: &dt, + StartChildWorkflowExecutionDecisionAttributes: &workflow.StartChildWorkflowExecutionDecisionAttributes{ + WorkflowId: common.StringPtr("child workflow2"), + WorkflowType: &workflow.WorkflowType{ + Name: common.StringPtr("child workflow type"), + }, + TaskList: &workflow.TaskList{Name: common.StringPtr(taskListName)}, + Input: []byte("random input"), + ParentClosePolicy: &parentClosePolicy2, + }, + }, + &workflow.Decision{ + DecisionType: &dt, + StartChildWorkflowExecutionDecisionAttributes: &workflow.StartChildWorkflowExecutionDecisionAttributes{ + WorkflowId: common.StringPtr("child workflow3"), + WorkflowType: &workflow.WorkflowType{ + Name: common.StringPtr("child workflow type"), + }, + TaskList: &workflow.TaskList{Name: common.StringPtr(taskListName)}, + Input: []byte("random input"), + ParentClosePolicy: &parentClosePolicy3, + }, + }, + }, + }, defaultHistoryMaxAutoResetPoints) + + _, _, err = msBuilder.AddStartChildWorkflowExecutionInitiatedEvent(event.GetEventId(), uuid.New(), &workflow.StartChildWorkflowExecutionDecisionAttributes{ + WorkflowId: common.StringPtr("child workflow1"), + WorkflowType: &workflow.WorkflowType{ + Name: common.StringPtr("child workflow type"), + }, + TaskList: &workflow.TaskList{Name: common.StringPtr(taskListName)}, + Input: []byte("random input"), + ParentClosePolicy: &parentClosePolicy1, + }) + s.Nil(err) + _, _, err = msBuilder.AddStartChildWorkflowExecutionInitiatedEvent(event.GetEventId(), uuid.New(), &workflow.StartChildWorkflowExecutionDecisionAttributes{ + WorkflowId: common.StringPtr("child workflow2"), + WorkflowType: &workflow.WorkflowType{ + Name: common.StringPtr("child workflow type"), + }, + TaskList: &workflow.TaskList{Name: common.StringPtr(taskListName)}, + Input: []byte("random input"), + ParentClosePolicy: &parentClosePolicy2, + }) + s.Nil(err) + _, _, err = msBuilder.AddStartChildWorkflowExecutionInitiatedEvent(event.GetEventId(), uuid.New(), &workflow.StartChildWorkflowExecutionDecisionAttributes{ + WorkflowId: common.StringPtr("child workflow3"), + WorkflowType: &workflow.WorkflowType{ + Name: common.StringPtr("child workflow type"), + }, + TaskList: &workflow.TaskList{Name: common.StringPtr(taskListName)}, + Input: []byte("random input"), + ParentClosePolicy: &parentClosePolicy3, + }) + s.Nil(err) + + msBuilder.FlushBufferedEvents() + + taskID := int64(59) + event = addCompleteWorkflowEvent(msBuilder, event.GetEventId(), nil) + s.mockClusterMetadata.On("ClusterNameForFailoverVersion", s.version).Return(s.mockClusterMetadata.GetCurrentClusterName()) + msBuilder.UpdateReplicationStateLastEventID(s.version, event.GetEventId()) + + transferTask := &persistence.TransferTaskInfo{ + Version: s.version, + DomainID: s.domainID, + WorkflowID: execution.GetWorkflowId(), + RunID: execution.GetRunId(), + TaskID: taskID, + TaskList: taskListName, + TaskType: persistence.TransferTaskTypeCloseExecution, + ScheduleID: event.GetEventId(), + } + + persistenceMutableState := createMutableState(msBuilder) + s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockVisibilityMgr.On("RecordWorkflowExecutionClosed", mock.Anything).Return(nil).Once() + s.mockArchivalMetadata.On("GetVisibilityConfig").Return(archiver.NewArchivalConfig("enabled", dc.GetStringPropertyFn("enabled"), dc.GetBoolPropertyFn(true), "disabled", "random URI")) + mVisibilityArchiver := &archiver.VisibilityArchiverMock{} + mVisibilityArchiver.On("Archive", mock.Anything, mock.Anything, mock.Anything).Return(nil) + s.mockArchiverProvider.On("GetVisibilityArchiver", mock.Anything, mock.Anything).Return(mVisibilityArchiver, nil) + s.mockHistoryClient.EXPECT().RequestCancelWorkflowExecution(nil, gomock.Any()).Return(nil).Times(1) + s.mockHistoryClient.EXPECT().TerminateWorkflowExecution(nil, gomock.Any()).Return(nil).Times(1) + + _, err = s.transferQueueActiveProcessor.process(transferTask, true) + s.Nil(err) +} + +func (s *transferQueueActiveProcessorSuite) TestProcessCloseExecution_NoParent_HasManyChildren() { + + execution := workflow.WorkflowExecution{ + WorkflowId: common.StringPtr("some random workflow ID"), + RunId: common.StringPtr(uuid.New()), + } + workflowType := "some random workflow type" + taskListName := "some random task list" + + msBuilder := newMutableStateBuilderWithReplicationStateWithEventV2(s.mockShard, s.mockShard.GetEventsCache(), s.logger, s.version, execution.GetRunId()) + _, err := msBuilder.AddWorkflowExecutionStartedEvent( + s.domainEntry, + execution, + &history.StartWorkflowExecutionRequest{ + DomainUUID: common.StringPtr(s.domainID), + StartRequest: &workflow.StartWorkflowExecutionRequest{ + WorkflowType: &workflow.WorkflowType{Name: common.StringPtr(workflowType)}, + TaskList: &workflow.TaskList{Name: common.StringPtr(taskListName)}, + ExecutionStartToCloseTimeoutSeconds: common.Int32Ptr(2), + TaskStartToCloseTimeoutSeconds: common.Int32Ptr(1), + }, + }, + ) + s.Nil(err) + + di := addDecisionTaskScheduledEvent(msBuilder) + event := addDecisionTaskStartedEvent(msBuilder, di.ScheduleID, taskListName, uuid.New()) + di.StartedID = event.GetEventId() + + dt := workflow.DecisionTypeStartChildWorkflowExecution + parentClosePolicy := workflow.ParentClosePolicyTerminate + decisions := []*workflow.Decision{} + for i := 0; i < 10; i++ { + decisions = append(decisions, &workflow.Decision{ + DecisionType: &dt, + StartChildWorkflowExecutionDecisionAttributes: &workflow.StartChildWorkflowExecutionDecisionAttributes{ + WorkflowId: common.StringPtr("child workflow" + string(i)), + WorkflowType: &workflow.WorkflowType{ + Name: common.StringPtr("child workflow type"), + }, + TaskList: &workflow.TaskList{Name: common.StringPtr(taskListName)}, + Input: []byte("random input"), + ParentClosePolicy: &parentClosePolicy, + }, + }) + } + + event, _ = msBuilder.AddDecisionTaskCompletedEvent(di.ScheduleID, di.StartedID, &workflow.RespondDecisionTaskCompletedRequest{ + ExecutionContext: nil, + Identity: common.StringPtr("some random identity"), + Decisions: decisions, + }, defaultHistoryMaxAutoResetPoints) + + for i := 0; i < 10; i++ { + _, _, err = msBuilder.AddStartChildWorkflowExecutionInitiatedEvent(event.GetEventId(), uuid.New(), &workflow.StartChildWorkflowExecutionDecisionAttributes{ + WorkflowId: common.StringPtr("child workflow" + string(i)), + WorkflowType: &workflow.WorkflowType{ + Name: common.StringPtr("child workflow type"), + }, + TaskList: &workflow.TaskList{Name: common.StringPtr(taskListName)}, + Input: []byte("random input"), + ParentClosePolicy: &parentClosePolicy, + }) + s.Nil(err) + } + + msBuilder.FlushBufferedEvents() + + taskID := int64(59) + event = addCompleteWorkflowEvent(msBuilder, event.GetEventId(), nil) + s.mockClusterMetadata.On("ClusterNameForFailoverVersion", s.version).Return(s.mockClusterMetadata.GetCurrentClusterName()) + msBuilder.UpdateReplicationStateLastEventID(s.version, event.GetEventId()) + + transferTask := &persistence.TransferTaskInfo{ + Version: s.version, + DomainID: s.domainID, + WorkflowID: execution.GetWorkflowId(), + RunID: execution.GetRunId(), + TaskID: taskID, + TaskList: taskListName, + TaskType: persistence.TransferTaskTypeCloseExecution, + ScheduleID: event.GetEventId(), + } + + persistenceMutableState := createMutableState(msBuilder) + s.mockExecutionMgr.On("GetWorkflowExecution", mock.Anything).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) + s.mockVisibilityMgr.On("RecordWorkflowExecutionClosed", mock.Anything).Return(nil).Once() + s.mockArchivalMetadata.On("GetVisibilityConfig").Return(archiver.NewArchivalConfig("enabled", dc.GetStringPropertyFn("enabled"), dc.GetBoolPropertyFn(true), "disabled", "random URI")) + mVisibilityArchiver := &archiver.VisibilityArchiverMock{} + mVisibilityArchiver.On("Archive", mock.Anything, mock.Anything, mock.Anything).Return(nil) + s.mockArchiverProvider.On("GetVisibilityArchiver", mock.Anything, mock.Anything).Return(mVisibilityArchiver, nil) + s.mockParentClosePolicyClient.On("SendParentClosePolicyRequest", mock.Anything).Return(nil).Times(1) + + _, err = s.transferQueueActiveProcessor.process(transferTask, true) + s.Nil(err) +} + func (s *transferQueueActiveProcessorSuite) TestProcessCancelExecution_Success() { execution := workflow.WorkflowExecution{ diff --git a/service/history/workflowExecutionContext.go b/service/history/workflowExecutionContext.go index 4588a2f4fbe..d64f1330cd2 100644 --- a/service/history/workflowExecutionContext.go +++ b/service/history/workflowExecutionContext.go @@ -763,7 +763,7 @@ func (c *workflowExecutionContextImpl) persistFirstWorkflowEvents( execution, &persistence.AppendHistoryNodesRequest{ IsNewBranch: true, - Info: historyGarbageCleanupInfo(domainID, workflowID, runID), + Info: persistence.BuildHistoryGarbageCleanupInfo(domainID, workflowID, runID), BranchToken: branchToken, Events: events, // TransactionID is set by shard context diff --git a/service/history/workflowResetor.go b/service/history/workflowResetor.go index 2618c0810d1..7253604561a 100644 --- a/service/history/workflowResetor.go +++ b/service/history/workflowResetor.go @@ -360,7 +360,7 @@ func (w *workflowResetorImpl) buildNewMutableStateForReset( forkResp, retError := w.eng.historyV2Mgr.ForkHistoryBranch(&persistence.ForkHistoryBranchRequest{ ForkBranchToken: baseBranchToken, ForkNodeID: resetDecisionCompletedEventID, - Info: historyGarbageCleanupInfo(domainID, workflowID, newRunID), + Info: persistence.BuildHistoryGarbageCleanupInfo(domainID, workflowID, newRunID), ShardID: common.IntPtr(w.eng.shard.GetShardID()), }) if retError != nil { @@ -400,10 +400,6 @@ func (w *workflowResetorImpl) terminateIfCurrIsRunning( return } -func historyGarbageCleanupInfo(domainID, workflowID, runID string) string { - return fmt.Sprintf("%v:%v:%v", domainID, workflowID, runID) -} - func (w *workflowResetorImpl) setEventIDsWithHistory(msBuilder mutableState) (int64, error) { history := msBuilder.GetHistoryBuilder().GetHistory().Events firstEvent := history[0] @@ -850,7 +846,7 @@ func (w *workflowResetorImpl) ApplyResetEvent( forkResp, retError := w.eng.historyV2Mgr.ForkHistoryBranch(&persistence.ForkHistoryBranchRequest{ ForkBranchToken: baseBranchToken, ForkNodeID: decisionFinishEventID, - Info: historyGarbageCleanupInfo(domainID, workflowID, resetAttr.GetNewRunId()), + Info: persistence.BuildHistoryGarbageCleanupInfo(domainID, workflowID, resetAttr.GetNewRunId()), ShardID: shardID, }) if retError != nil { diff --git a/service/history/workflowResetor_test.go b/service/history/workflowResetor_test.go index edda68ffa61..4447197bbd4 100644 --- a/service/history/workflowResetor_test.go +++ b/service/history/workflowResetor_test.go @@ -4012,7 +4012,7 @@ func (s *resetorSuite) TestApplyReset() { forkReq := &p.ForkHistoryBranchRequest{ ForkBranchToken: forkBranchToken, ForkNodeID: 30, - Info: historyGarbageCleanupInfo(domainID, wid, newRunID), + Info: p.BuildHistoryGarbageCleanupInfo(domainID, wid, newRunID), ShardID: common.IntPtr(s.shardID), } forkResp := &p.ForkHistoryBranchResponse{ diff --git a/service/worker/archiver/client.go b/service/worker/archiver/client.go index cf8c04ced4e..c6fa4046d93 100644 --- a/service/worker/archiver/client.go +++ b/service/worker/archiver/client.go @@ -144,7 +144,7 @@ func (c *client) archiveInline(ctx context.Context, request *ClientRequest, tagg defer func() { if err != nil { c.metricsClient.IncCounter(metrics.ArchiverClientScope, metrics.ArchiverClientInlineArchiveFailureCount) - taggedLogger.Error("failed to perform workflow history archival inline", tag.Error(err)) + taggedLogger.Info("failed to perform workflow history archival inline", tag.Error(err)) } }() c.metricsClient.IncCounter(metrics.ArchiverClientScope, metrics.ArchiverClientInlineArchiveAttemptCount) diff --git a/service/worker/batcher/workflow.go b/service/worker/batcher/workflow.go index 9444de71169..44061acff01 100644 --- a/service/worker/batcher/workflow.go +++ b/service/worker/batcher/workflow.go @@ -429,6 +429,7 @@ func processTask( if err != nil { return err } + activity.RecordHeartbeat(ctx, task.hbd) err = procFn(wf.GetWorkflowId(), wf.GetRunId()) if err != nil { @@ -466,8 +467,6 @@ func processTask( }) } } - - activity.RecordHeartbeat(ctx, task.hbd) } return nil diff --git a/service/worker/parentclosepolicy/client.go b/service/worker/parentclosepolicy/client.go new file mode 100644 index 00000000000..1d242e9d0fa --- /dev/null +++ b/service/worker/parentclosepolicy/client.go @@ -0,0 +1,87 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package parentclosepolicy + +import ( + "context" + "fmt" + "math/rand" + "time" + + "github.com/uber/cadence/common" + "github.com/uber/cadence/common/log" + "github.com/uber/cadence/common/metrics" + "go.uber.org/cadence/.gen/go/cadence/workflowserviceclient" + cclient "go.uber.org/cadence/client" +) + +type ( + + // Client is used to send request to processor workflow + Client interface { + SendParentClosePolicyRequest(Request) error + } + + clientImpl struct { + metricsClient metrics.Client + logger log.Logger + cadenceClient cclient.Client + numWorkflows int + } +) + +var _ Client = (*clientImpl)(nil) + +const ( + signalTimeout = 400 * time.Millisecond + workflowIDPrefix = "parent-close-policy-workflow" +) + +// NewClient creates a new Client +func NewClient( + metricsClient metrics.Client, + logger log.Logger, + publicClient workflowserviceclient.Interface, + numWorkflows int, +) Client { + return &clientImpl{ + metricsClient: metricsClient, + logger: logger, + cadenceClient: cclient.NewClient(publicClient, common.SystemLocalDomainName, &cclient.Options{}), + numWorkflows: numWorkflows, + } +} + +func (c *clientImpl) SendParentClosePolicyRequest(request Request) error { + randomID := rand.Intn(c.numWorkflows) + workflowID := fmt.Sprintf("%v-%v", workflowIDPrefix, randomID) + workflowOptions := cclient.StartWorkflowOptions{ + ID: workflowID, + TaskList: processorTaskListName, + ExecutionStartToCloseTimeout: infiniteDuration, + DecisionTaskStartToCloseTimeout: time.Minute, + WorkflowIDReusePolicy: cclient.WorkflowIDReusePolicyAllowDuplicate, + } + signalCtx, cancel := context.WithTimeout(context.Background(), signalTimeout) + defer cancel() + _, err := c.cadenceClient.SignalWithStartWorkflow(signalCtx, workflowID, processorChannelName, request, workflowOptions, processorWFTypeName, nil) + return err +} diff --git a/service/worker/parentclosepolicy/client_mock.go b/service/worker/parentclosepolicy/client_mock.go new file mode 100644 index 00000000000..556a0093776 --- /dev/null +++ b/service/worker/parentclosepolicy/client_mock.go @@ -0,0 +1,48 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package parentclosepolicy + +import ( + "github.com/stretchr/testify/mock" +) + +var _ Client = (*ClientMock)(nil) + +// ClientMock is an autogenerated mock type for the Client type +type ClientMock struct { + mock.Mock +} + +// SendParentClosePolicyRequest provides a mock function with given fields: _a0, _a1 +func (_m *ClientMock) SendParentClosePolicyRequest(_a0 Request) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(Request) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/service/worker/parentclosepolicy/processor.go b/service/worker/parentclosepolicy/processor.go new file mode 100644 index 00000000000..ffae56bf86e --- /dev/null +++ b/service/worker/parentclosepolicy/processor.go @@ -0,0 +1,84 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package parentclosepolicy + +import ( + "context" + + "github.com/opentracing/opentracing-go" + "github.com/uber-go/tally" + "github.com/uber/cadence/client" + "github.com/uber/cadence/common" + "github.com/uber/cadence/common/log" + "github.com/uber/cadence/common/log/tag" + "github.com/uber/cadence/common/metrics" + "go.uber.org/cadence/.gen/go/cadence/workflowserviceclient" + "go.uber.org/cadence/worker" +) + +type ( + // BootstrapParams contains the set of params needed to bootstrap + // the sub-system + BootstrapParams struct { + // Config contains the configuration for scanner + // ServiceClient is an instance of cadence service client + ServiceClient workflowserviceclient.Interface + // MetricsClient is an instance of metrics object for emitting stats + MetricsClient metrics.Client + Logger log.Logger + // TallyScope is an instance of tally metrics scope + TallyScope tally.Scope + // ClientBean is an instance of client.Bean for a collection of clients + ClientBean client.Bean + } + + // Processor is the background sub-system that execute workflow for ParentClosePolicy + Processor struct { + svcClient workflowserviceclient.Interface + clientBean client.Bean + metricsClient metrics.Client + tallyScope tally.Scope + logger log.Logger + } +) + +// New returns a new instance as daemon +func New(params *BootstrapParams) *Processor { + return &Processor{ + svcClient: params.ServiceClient, + metricsClient: params.MetricsClient, + tallyScope: params.TallyScope, + logger: params.Logger.WithTags(tag.ComponentBatcher), + clientBean: params.ClientBean, + } +} + +// Start starts the scanner +func (s *Processor) Start() error { + ctx := context.WithValue(context.Background(), processorContextKey, s) + workerOpts := worker.Options{ + MetricsScope: s.tallyScope, + BackgroundActivityContext: ctx, + Tracer: opentracing.GlobalTracer(), + } + processorWorker := worker.New(s.svcClient, common.SystemLocalDomainName, processorTaskListName, workerOpts) + return processorWorker.Start() +} diff --git a/service/worker/parentclosepolicy/workflow.go b/service/worker/parentclosepolicy/workflow.go new file mode 100644 index 00000000000..52763e1450d --- /dev/null +++ b/service/worker/parentclosepolicy/workflow.go @@ -0,0 +1,162 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package parentclosepolicy + +import ( + "context" + "time" + + h "github.com/uber/cadence/.gen/go/history" + "github.com/uber/cadence/.gen/go/shared" + "github.com/uber/cadence/common" + "github.com/uber/cadence/common/log" + "github.com/uber/cadence/common/log/tag" + "github.com/uber/cadence/common/metrics" + "go.uber.org/cadence" + "go.uber.org/cadence/activity" + "go.uber.org/cadence/workflow" +) + +const ( + processorContextKey = "processorContext" + // processorTaskListName is the tasklist name + processorTaskListName = "cadence-sys-processor-parent-close-policy" + // processorWFTypeName is the workflow type + processorWFTypeName = "cadence-sys-parent-close-policy-workflow" + processorActivityName = "cadence-sys-parent-close-policy-activity" + infiniteDuration = 20 * 365 * 24 * time.Hour + processorChannelName = "ParentClosePolicyProcessorChannelName" +) + +type ( + // RequestDetail defines detail of each workflow to process + RequestDetail struct { + WorkflowID string + RunID string + Policy shared.ParentClosePolicy + } + + // Request defines the request for parent close policy + Request struct { + Executions []RequestDetail + DomainName string + DomainUUID string + } +) + +var ( + retryPolicy = cadence.RetryPolicy{ + InitialInterval: 10 * time.Second, + BackoffCoefficient: 1.7, + MaximumInterval: 5 * time.Minute, + ExpirationInterval: infiniteDuration, + } + + activityOptions = workflow.ActivityOptions{ + ScheduleToStartTimeout: time.Minute, + StartToCloseTimeout: 5 * time.Minute, + RetryPolicy: &retryPolicy, + } +) + +func init() { + workflow.RegisterWithOptions(ProcessorWorkflow, workflow.RegisterOptions{Name: processorWFTypeName}) + activity.RegisterWithOptions(ProcessorActivity, activity.RegisterOptions{Name: processorActivityName}) +} + +// ProcessorWorkflow is the workflow that performs actions for ParentClosePolicy +func ProcessorWorkflow(ctx workflow.Context) error { + requestCh := workflow.GetSignalChannel(ctx, processorChannelName) + for { + var request Request + if !requestCh.ReceiveAsync(&request) { + // no more request + break + } + + opt := workflow.WithActivityOptions(ctx, activityOptions) + _ = workflow.ExecuteActivity(opt, processorActivityName, request).Get(ctx, nil) + } + return nil +} + +// ProcessorActivity is activity for processing batch operation +func ProcessorActivity(ctx context.Context, request Request) error { + processor := ctx.Value(processorContextKey).(*Processor) + client := processor.clientBean.GetHistoryClient() + for _, execution := range request.Executions { + var err error + switch execution.Policy { + case shared.ParentClosePolicyAbandon: + //no-op + continue + case shared.ParentClosePolicyTerminate: + err = client.TerminateWorkflowExecution(nil, &h.TerminateWorkflowExecutionRequest{ + DomainUUID: common.StringPtr(request.DomainUUID), + TerminateRequest: &shared.TerminateWorkflowExecutionRequest{ + Domain: common.StringPtr(request.DomainName), + WorkflowExecution: &shared.WorkflowExecution{ + WorkflowId: common.StringPtr(execution.WorkflowID), + RunId: common.StringPtr(execution.RunID), + }, + Reason: common.StringPtr("by parent close policy"), + Identity: common.StringPtr(processorWFTypeName), + }, + }) + case shared.ParentClosePolicyRequestCancel: + err = client.RequestCancelWorkflowExecution(nil, &h.RequestCancelWorkflowExecutionRequest{ + DomainUUID: common.StringPtr(request.DomainUUID), + CancelRequest: &shared.RequestCancelWorkflowExecutionRequest{ + Domain: common.StringPtr(request.DomainName), + WorkflowExecution: &shared.WorkflowExecution{ + WorkflowId: common.StringPtr(execution.WorkflowID), + RunId: common.StringPtr(execution.RunID), + }, + Identity: common.StringPtr(processorWFTypeName), + }, + }) + } + + if err != nil { + if _, ok := err.(*shared.EntityNotExistsError); ok { + err = nil + } + } + + if err != nil { + processor.metricsClient.IncCounter(metrics.ParentClosePolicyProcessorScope, metrics.ParentClosePolicyProcessorFailures) + getActivityLogger(ctx).Error("failed to process parent close policy", tag.Error(err)) + return err + } + processor.metricsClient.IncCounter(metrics.ParentClosePolicyProcessorScope, metrics.ParentClosePolicyProcessorSuccess) + } + return nil +} + +func getActivityLogger(ctx context.Context) log.Logger { + processor := ctx.Value(processorContextKey).(*Processor) + wfInfo := activity.GetInfo(ctx) + return processor.logger.WithTags( + tag.WorkflowID(wfInfo.WorkflowExecution.ID), + tag.WorkflowRunID(wfInfo.WorkflowExecution.RunID), + tag.WorkflowDomainName(wfInfo.WorkflowDomain), + ) +} diff --git a/service/worker/replicator/replicationTask.go b/service/worker/replicator/replicationTask.go index 3d2c941a678..9b8efcea623 100644 --- a/service/worker/replicator/replicationTask.go +++ b/service/worker/replicator/replicationTask.go @@ -126,6 +126,7 @@ func newActivityReplicationTask(task *replicator.ReplicationTask, msg messaging. Attempt: attr.Attempt, LastFailureReason: attr.LastFailureReason, LastWorkerIdentity: attr.LastWorkerIdentity, + LastFailureDetails: attr.LastFailureDetails, }, } } diff --git a/service/worker/replicator/replicationTask_test.go b/service/worker/replicator/replicationTask_test.go index ff8b1562d85..425d7044966 100644 --- a/service/worker/replicator/replicationTask_test.go +++ b/service/worker/replicator/replicationTask_test.go @@ -26,16 +26,14 @@ import ( "time" "github.com/golang/mock/gomock" - - "github.com/uber/cadence/.gen/go/history/historyservicetest" - "github.com/uber/cadence/common/clock" - "github.com/stretchr/testify/suite" "github.com/uber-go/tally" h "github.com/uber/cadence/.gen/go/history" + "github.com/uber/cadence/.gen/go/history/historyservicetest" "github.com/uber/cadence/.gen/go/replicator" "github.com/uber/cadence/.gen/go/shared" "github.com/uber/cadence/common" + "github.com/uber/cadence/common/clock" "github.com/uber/cadence/common/cluster" "github.com/uber/cadence/common/definition" "github.com/uber/cadence/common/log" @@ -245,6 +243,7 @@ func (s *activityReplicationTaskSuite) TestNewActivityReplicationTask() { Attempt: replicationAttr.Attempt, LastFailureReason: replicationAttr.LastFailureReason, LastWorkerIdentity: replicationAttr.LastWorkerIdentity, + LastFailureDetails: replicationAttr.LastFailureDetails, }, }, task, @@ -668,6 +667,7 @@ func (s *activityReplicationTaskSuite) getActivityReplicationTask() *replicator. Attempt: common.Int32Ptr(59), LastFailureReason: common.StringPtr("some random failure reason"), LastWorkerIdentity: common.StringPtr("some random worker identity"), + LastFailureDetails: []byte("some random failure details"), } replicationTask := &replicator.ReplicationTask{ TaskType: replicator.ReplicationTaskTypeSyncActivity.Ptr(), diff --git a/service/worker/scanner/history/scavenger.go b/service/worker/scanner/history/scavenger.go new file mode 100644 index 00000000000..4ea60d1ee3f --- /dev/null +++ b/service/worker/scanner/history/scavenger.go @@ -0,0 +1,307 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "time" + + "github.com/uber/cadence/.gen/go/history" + "github.com/uber/cadence/.gen/go/history/historyserviceclient" + "github.com/uber/cadence/.gen/go/shared" + "github.com/uber/cadence/common" + "github.com/uber/cadence/common/log" + "github.com/uber/cadence/common/log/tag" + "github.com/uber/cadence/common/metrics" + p "github.com/uber/cadence/common/persistence" + "go.uber.org/cadence/activity" + "golang.org/x/time/rate" +) + +type ( + // ScavengerHeartbeatDetails is the heartbeat detail for HistoryScavengerActivity + ScavengerHeartbeatDetails struct { + NextPageToken []byte + CurrentPage int + SkipCount int + ErrorCount int + SuccCount int + } + + // Scavenger is the type that holds the state for history scavenger daemon + Scavenger struct { + db p.HistoryV2Manager + client historyserviceclient.Interface + hbd ScavengerHeartbeatDetails + rps int + limiter *rate.Limiter + metrics metrics.Client + logger log.Logger + isInTest bool + } + + taskDetail struct { + domainID string + workflowID string + runID string + treeID string + branchID string + + // passing along the current heartbeat details to make heartbeat within a task so that it won't timeout + hbd ScavengerHeartbeatDetails + } +) + +const ( + // used this to decide how many goroutines to process + rpsPerConcurrency = 50 + pageSize = 1000 + // only clean up history branches that older than this threshold + cleanUpThreshold = time.Hour * 24 +) + +// NewScavenger returns an instance of history scavenger daemon +// The Scavenger can be started by calling the Run() method on the +// returned object. Calling the Run() method will result in one +// complete iteration over all of the history branches in the system. For +// each branch, the scavenger will attempt +// - describe the corresponding workflow execution +// - deletion of history itself, if there are no workflow execution +func NewScavenger( + db p.HistoryV2Manager, + rps int, + client historyserviceclient.Interface, + hbd ScavengerHeartbeatDetails, + metricsClient metrics.Client, + logger log.Logger, +) *Scavenger { + + rateLimiter := rate.NewLimiter(rate.Limit(rps), rps) + + return &Scavenger{ + db: db, + client: client, + hbd: hbd, + rps: rps, + limiter: rateLimiter, + metrics: metricsClient, + logger: logger, + } +} + +// Run runs the scavenger +func (s *Scavenger) Run(ctx context.Context) (ScavengerHeartbeatDetails, error) { + taskCh := make(chan taskDetail, pageSize) + respCh := make(chan error, pageSize) + concurrency := s.rps/rpsPerConcurrency + 1 + + for i := 0; i < concurrency; i++ { + go s.startTaskProcessor(ctx, taskCh, respCh) + } + + for { + resp, err := s.db.GetAllHistoryTreeBranches(&p.GetAllHistoryTreeBranchesRequest{ + PageSize: pageSize, + NextPageToken: s.hbd.NextPageToken, + }) + if err != nil { + return s.hbd, err + } + batchCount := len(resp.Branches) + + skips := 0 + errorsOnSplitting := 0 + // send all tasks + for _, br := range resp.Branches { + if time.Now().Add(-cleanUpThreshold).Before(br.ForkTime) { + batchCount-- + skips++ + s.metrics.IncCounter(metrics.HistoryScavengerScope, metrics.HistoryScavengerSkipCount) + continue + } + + domainID, wid, rid, err := p.SplitHistoryGarbageCleanupInfo(br.Info) + if err != nil { + batchCount-- + errorsOnSplitting++ + s.logger.Error("unable to parse the history cleanup info", tag.DetailInfo(br.Info)) + s.metrics.IncCounter(metrics.HistoryScavengerScope, metrics.HistoryScavengerErrorCount) + continue + } + + taskCh <- taskDetail{ + domainID: domainID, + workflowID: wid, + runID: rid, + treeID: br.TreeID, + branchID: br.BranchID, + + hbd: s.hbd, + } + } + + succCount := 0 + errCount := 0 + if batchCount > 0 { + // wait for counters indicate this batch is done + Loop: + for { + select { + case err := <-respCh: + if err == nil { + s.metrics.IncCounter(metrics.HistoryScavengerScope, metrics.HistoryScavengerSuccessCount) + succCount++ + } else { + s.metrics.IncCounter(metrics.HistoryScavengerScope, metrics.HistoryScavengerErrorCount) + errCount++ + } + if succCount+errCount == batchCount { + break Loop + } + case <-ctx.Done(): + return s.hbd, ctx.Err() + } + } + } + + s.hbd.CurrentPage++ + s.hbd.NextPageToken = resp.NextPageToken + s.hbd.SuccCount += succCount + s.hbd.ErrorCount += errCount + errorsOnSplitting + s.hbd.SkipCount += skips + if !s.isInTest { + activity.RecordHeartbeat(ctx, s.hbd) + } + + if len(s.hbd.NextPageToken) == 0 { + break + } + } + return s.hbd, nil +} + +func (s *Scavenger) startTaskProcessor( + ctx context.Context, + taskCh chan taskDetail, + respCh chan error, +) { + for { + select { + case <-ctx.Done(): + return + case task := <-taskCh: + if isDone(ctx) { + return + } + + if !s.isInTest { + activity.RecordHeartbeat(ctx, s.hbd) + } + + err := s.limiter.Wait(ctx) + if err != nil { + respCh <- err + s.logger.Error("encounter error when wait for rate limiter", + getTaskLoggingTags(err, task)...) + continue + } + + // this checks if the mutableState still exists + // if not then the history branch is garbage, we need to delete the history branch + _, err = s.client.DescribeMutableState(ctx, &history.DescribeMutableStateRequest{ + DomainUUID: common.StringPtr(task.domainID), + Execution: &shared.WorkflowExecution{ + WorkflowId: common.StringPtr(task.workflowID), + RunId: common.StringPtr(task.runID), + }, + }) + + if err != nil { + if _, ok := err.(*shared.EntityNotExistsError); ok { + //deleting history branch + var branchToken []byte + branchToken, err = p.NewHistoryBranchTokenByBranchID(task.treeID, task.branchID) + if err != nil { + respCh <- err + s.logger.Error("encounter error when creating branch token", + getTaskLoggingTags(err, task)...) + continue + } + + err = s.db.DeleteHistoryBranch(&p.DeleteHistoryBranchRequest{ + BranchToken: branchToken, + // This is a required argument but it is not needed for Cassandra. + // Since this scanner is only for Cassandra, + // we can fill any number here to let to code go through + ShardID: common.IntPtr(1), + }) + if err != nil { + respCh <- err + s.logger.Error("encounter error when deleting garbage history branch", + getTaskLoggingTags(err, task)...) + } else { + // deleted garbage + s.logger.Info("deleted history garbage", + getTaskLoggingTags(nil, task)...) + + respCh <- nil + } + } else { + s.logger.Error("encounter error when describing the mutable state", + getTaskLoggingTags(err, task)...) + respCh <- err + } + } else { + // no garbage + respCh <- nil + } + } + } +} + +func getTaskLoggingTags(err error, task taskDetail) []tag.Tag { + if err != nil { + return []tag.Tag{ + tag.Error(err), + tag.WorkflowDomainID(task.domainID), + tag.WorkflowID(task.workflowID), + tag.WorkflowRunID(task.runID), + tag.WorkflowTreeID(task.treeID), + tag.WorkflowBranchID(task.branchID), + } + } + return []tag.Tag{ + tag.WorkflowDomainID(task.domainID), + tag.WorkflowID(task.workflowID), + tag.WorkflowRunID(task.runID), + tag.WorkflowTreeID(task.treeID), + tag.WorkflowBranchID(task.branchID), + } +} + +func isDone(ctx context.Context) bool { + select { + case <-ctx.Done(): + return true + default: + return false + } +} diff --git a/service/worker/scanner/history/scavenger_test.go b/service/worker/scanner/history/scavenger_test.go new file mode 100644 index 00000000000..56449028dfd --- /dev/null +++ b/service/worker/scanner/history/scavenger_test.go @@ -0,0 +1,461 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/suite" + "github.com/uber-go/tally" + "github.com/uber/cadence/.gen/go/history" + "github.com/uber/cadence/.gen/go/history/historyservicetest" + "github.com/uber/cadence/.gen/go/shared" + "github.com/uber/cadence/common" + "github.com/uber/cadence/common/log" + "github.com/uber/cadence/common/log/loggerimpl" + "github.com/uber/cadence/common/metrics" + "github.com/uber/cadence/common/mocks" + p "github.com/uber/cadence/common/persistence" + "go.uber.org/zap" +) + +type ( + ScavengerTestSuite struct { + suite.Suite + logger log.Logger + metric metrics.Client + } +) + +func TestScavengerTestSuite(t *testing.T) { + suite.Run(t, new(ScavengerTestSuite)) +} + +func (s *ScavengerTestSuite) SetupTest() { + zapLogger, err := zap.NewDevelopment() + if err != nil { + s.Require().NoError(err) + } + s.logger = loggerimpl.NewLogger(zapLogger) + s.metric = metrics.NewClient(tally.NoopScope, metrics.Worker) +} + +func (s *ScavengerTestSuite) createTestScavenger(rps int) (*mocks.HistoryV2Manager, *historyservicetest.MockClient, *Scavenger, *gomock.Controller) { + db := &mocks.HistoryV2Manager{} + controller := gomock.NewController(s.T()) + workflowClient := historyservicetest.NewMockClient(controller) + scvgr := NewScavenger(db, 100, workflowClient, ScavengerHeartbeatDetails{}, s.metric, s.logger) + scvgr.isInTest = true + return db, workflowClient, scvgr, controller +} + +func (s *ScavengerTestSuite) TestAllSkipTasksTwoPages() { + db, _, scvgr, controller := s.createTestScavenger(100) + defer controller.Finish() + db.On("GetAllHistoryTreeBranches", &p.GetAllHistoryTreeBranchesRequest{ + PageSize: pageSize, + }).Return(&p.GetAllHistoryTreeBranchesResponse{ + NextPageToken: []byte("page1"), + Branches: []p.HistoryBranchDetail{ + { + TreeID: "treeID1", + BranchID: "branchID1", + ForkTime: time.Now(), + Info: p.BuildHistoryGarbageCleanupInfo("domainID1", "workflowID1", "runID1"), + }, + { + TreeID: "treeID2", + BranchID: "branchID2", + ForkTime: time.Now(), + Info: p.BuildHistoryGarbageCleanupInfo("domainID2", "workflowID2", "runID2"), + }, + }, + }, nil).Once() + + db.On("GetAllHistoryTreeBranches", &p.GetAllHistoryTreeBranchesRequest{ + PageSize: pageSize, + NextPageToken: []byte("page1"), + }).Return(&p.GetAllHistoryTreeBranchesResponse{ + Branches: []p.HistoryBranchDetail{ + { + TreeID: "treeID3", + BranchID: "branchID3", + ForkTime: time.Now(), + Info: p.BuildHistoryGarbageCleanupInfo("domainID3", "workflowID3", "runID3"), + }, + { + TreeID: "treeID4", + BranchID: "branchID4", + ForkTime: time.Now(), + Info: p.BuildHistoryGarbageCleanupInfo("domainID4", "workflowID4", "runID4"), + }, + }, + }, nil).Once() + + hbd, err := scvgr.Run(context.Background()) + s.Nil(err) + s.Equal(4, hbd.SkipCount) + s.Equal(0, hbd.SuccCount) + s.Equal(0, hbd.ErrorCount) + s.Equal(2, hbd.CurrentPage) + s.Equal(0, len(hbd.NextPageToken)) +} + +func (s *ScavengerTestSuite) TestAllErrorSplittingTasksTwoPages() { + db, _, scvgr, controller := s.createTestScavenger(100) + defer controller.Finish() + db.On("GetAllHistoryTreeBranches", &p.GetAllHistoryTreeBranchesRequest{ + PageSize: pageSize, + }).Return(&p.GetAllHistoryTreeBranchesResponse{ + NextPageToken: []byte("page1"), + Branches: []p.HistoryBranchDetail{ + { + TreeID: "treeID1", + BranchID: "branchID1", + ForkTime: time.Now().Add(-cleanUpThreshold * 2), + Info: "error-info", + }, + { + TreeID: "treeID2", + BranchID: "branchID2", + ForkTime: time.Now().Add(-cleanUpThreshold * 2), + Info: "error-info", + }, + }, + }, nil).Once() + + db.On("GetAllHistoryTreeBranches", &p.GetAllHistoryTreeBranchesRequest{ + PageSize: pageSize, + NextPageToken: []byte("page1"), + }).Return(&p.GetAllHistoryTreeBranchesResponse{ + Branches: []p.HistoryBranchDetail{ + { + TreeID: "treeID3", + BranchID: "branchID3", + ForkTime: time.Now().Add(-cleanUpThreshold * 2), + Info: "error-info", + }, + { + TreeID: "treeID4", + BranchID: "branchID4", + ForkTime: time.Now().Add(-cleanUpThreshold * 2), + Info: "error-info", + }, + }, + }, nil).Once() + + hbd, err := scvgr.Run(context.Background()) + s.Nil(err) + s.Equal(0, hbd.SkipCount) + s.Equal(0, hbd.SuccCount) + s.Equal(4, hbd.ErrorCount) + s.Equal(2, hbd.CurrentPage) + s.Equal(0, len(hbd.NextPageToken)) +} + +func (s *ScavengerTestSuite) TestNoGarbageTwoPages() { + db, client, scvgr, controller := s.createTestScavenger(100) + defer controller.Finish() + db.On("GetAllHistoryTreeBranches", &p.GetAllHistoryTreeBranchesRequest{ + PageSize: pageSize, + }).Return(&p.GetAllHistoryTreeBranchesResponse{ + NextPageToken: []byte("page1"), + Branches: []p.HistoryBranchDetail{ + { + TreeID: "treeID1", + BranchID: "branchID1", + ForkTime: time.Now().Add(-cleanUpThreshold * 2), + Info: p.BuildHistoryGarbageCleanupInfo("domainID1", "workflowID1", "runID1"), + }, + { + TreeID: "treeID2", + BranchID: "branchID2", + ForkTime: time.Now().Add(-cleanUpThreshold * 2), + Info: p.BuildHistoryGarbageCleanupInfo("domainID2", "workflowID2", "runID2"), + }, + }, + }, nil).Once() + + db.On("GetAllHistoryTreeBranches", &p.GetAllHistoryTreeBranchesRequest{ + PageSize: pageSize, + NextPageToken: []byte("page1"), + }).Return(&p.GetAllHistoryTreeBranchesResponse{ + Branches: []p.HistoryBranchDetail{ + { + TreeID: "treeID3", + BranchID: "branchID3", + ForkTime: time.Now().Add(-cleanUpThreshold * 2), + Info: p.BuildHistoryGarbageCleanupInfo("domainID3", "workflowID3", "runID3"), + }, + { + TreeID: "treeID4", + BranchID: "branchID4", + ForkTime: time.Now().Add(-cleanUpThreshold * 2), + Info: p.BuildHistoryGarbageCleanupInfo("domainID4", "workflowID4", "runID4"), + }, + }, + }, nil).Once() + + client.EXPECT().DescribeMutableState(gomock.Any(), &history.DescribeMutableStateRequest{ + DomainUUID: common.StringPtr("domainID1"), + Execution: &shared.WorkflowExecution{ + WorkflowId: common.StringPtr("workflowID1"), + RunId: common.StringPtr("runID1"), + }, + }).Return(nil, nil) + client.EXPECT().DescribeMutableState(gomock.Any(), &history.DescribeMutableStateRequest{ + DomainUUID: common.StringPtr("domainID2"), + Execution: &shared.WorkflowExecution{ + WorkflowId: common.StringPtr("workflowID2"), + RunId: common.StringPtr("runID2"), + }, + }).Return(nil, nil) + client.EXPECT().DescribeMutableState(gomock.Any(), &history.DescribeMutableStateRequest{ + DomainUUID: common.StringPtr("domainID3"), + Execution: &shared.WorkflowExecution{ + WorkflowId: common.StringPtr("workflowID3"), + RunId: common.StringPtr("runID3"), + }, + }).Return(nil, nil) + client.EXPECT().DescribeMutableState(gomock.Any(), &history.DescribeMutableStateRequest{ + DomainUUID: common.StringPtr("domainID4"), + Execution: &shared.WorkflowExecution{ + WorkflowId: common.StringPtr("workflowID4"), + RunId: common.StringPtr("runID4"), + }, + }).Return(nil, nil) + + hbd, err := scvgr.Run(context.Background()) + s.Nil(err) + s.Equal(0, hbd.SkipCount) + s.Equal(4, hbd.SuccCount) + s.Equal(0, hbd.ErrorCount) + s.Equal(2, hbd.CurrentPage) + s.Equal(0, len(hbd.NextPageToken)) +} + +func (s *ScavengerTestSuite) TestDeletingBranchesTwoPages() { + db, client, scvgr, controller := s.createTestScavenger(100) + defer controller.Finish() + db.On("GetAllHistoryTreeBranches", &p.GetAllHistoryTreeBranchesRequest{ + PageSize: pageSize, + }).Return(&p.GetAllHistoryTreeBranchesResponse{ + NextPageToken: []byte("page1"), + Branches: []p.HistoryBranchDetail{ + { + TreeID: "treeID1", + BranchID: "branchID1", + ForkTime: time.Now().Add(-cleanUpThreshold * 2), + Info: p.BuildHistoryGarbageCleanupInfo("domainID1", "workflowID1", "runID1"), + }, + { + TreeID: "treeID2", + BranchID: "branchID2", + ForkTime: time.Now().Add(-cleanUpThreshold * 2), + Info: p.BuildHistoryGarbageCleanupInfo("domainID2", "workflowID2", "runID2"), + }, + }, + }, nil).Once() + db.On("GetAllHistoryTreeBranches", &p.GetAllHistoryTreeBranchesRequest{ + PageSize: pageSize, + NextPageToken: []byte("page1"), + }).Return(&p.GetAllHistoryTreeBranchesResponse{ + Branches: []p.HistoryBranchDetail{ + { + TreeID: "treeID3", + BranchID: "branchID3", + ForkTime: time.Now().Add(-cleanUpThreshold * 2), + Info: p.BuildHistoryGarbageCleanupInfo("domainID3", "workflowID3", "runID3"), + }, + { + TreeID: "treeID4", + BranchID: "branchID4", + ForkTime: time.Now().Add(-cleanUpThreshold * 2), + Info: p.BuildHistoryGarbageCleanupInfo("domainID4", "workflowID4", "runID4"), + }, + }, + }, nil).Once() + + client.EXPECT().DescribeMutableState(gomock.Any(), &history.DescribeMutableStateRequest{ + DomainUUID: common.StringPtr("domainID1"), + Execution: &shared.WorkflowExecution{ + WorkflowId: common.StringPtr("workflowID1"), + RunId: common.StringPtr("runID1"), + }, + }).Return(nil, &shared.EntityNotExistsError{}) + client.EXPECT().DescribeMutableState(gomock.Any(), &history.DescribeMutableStateRequest{ + DomainUUID: common.StringPtr("domainID2"), + Execution: &shared.WorkflowExecution{ + WorkflowId: common.StringPtr("workflowID2"), + RunId: common.StringPtr("runID2"), + }, + }).Return(nil, &shared.EntityNotExistsError{}) + client.EXPECT().DescribeMutableState(gomock.Any(), &history.DescribeMutableStateRequest{ + DomainUUID: common.StringPtr("domainID3"), + Execution: &shared.WorkflowExecution{ + WorkflowId: common.StringPtr("workflowID3"), + RunId: common.StringPtr("runID3"), + }, + }).Return(nil, &shared.EntityNotExistsError{}) + client.EXPECT().DescribeMutableState(gomock.Any(), &history.DescribeMutableStateRequest{ + DomainUUID: common.StringPtr("domainID4"), + Execution: &shared.WorkflowExecution{ + WorkflowId: common.StringPtr("workflowID4"), + RunId: common.StringPtr("runID4"), + }, + }).Return(nil, &shared.EntityNotExistsError{}) + + branchToken1, err := p.NewHistoryBranchTokenByBranchID("treeID1", "branchID1") + s.Nil(err) + db.On("DeleteHistoryBranch", &p.DeleteHistoryBranchRequest{ + BranchToken: branchToken1, + ShardID: common.IntPtr(1), + }).Return(nil).Once() + branchToken2, err := p.NewHistoryBranchTokenByBranchID("treeID2", "branchID2") + s.Nil(err) + db.On("DeleteHistoryBranch", &p.DeleteHistoryBranchRequest{ + BranchToken: branchToken2, + ShardID: common.IntPtr(1), + }).Return(nil).Once() + branchToken3, err := p.NewHistoryBranchTokenByBranchID("treeID3", "branchID3") + s.Nil(err) + db.On("DeleteHistoryBranch", &p.DeleteHistoryBranchRequest{ + BranchToken: branchToken3, + ShardID: common.IntPtr(1), + }).Return(nil).Once() + branchToken4, err := p.NewHistoryBranchTokenByBranchID("treeID4", "branchID4") + s.Nil(err) + db.On("DeleteHistoryBranch", &p.DeleteHistoryBranchRequest{ + BranchToken: branchToken4, + ShardID: common.IntPtr(1), + }).Return(nil).Once() + + hbd, err := scvgr.Run(context.Background()) + s.Nil(err) + s.Equal(0, hbd.SkipCount) + s.Equal(4, hbd.SuccCount) + s.Equal(0, hbd.ErrorCount) + s.Equal(2, hbd.CurrentPage) + s.Equal(0, len(hbd.NextPageToken)) +} + +func (s *ScavengerTestSuite) TestMixesTwoPages() { + db, client, scvgr, controller := s.createTestScavenger(100) + defer controller.Finish() + db.On("GetAllHistoryTreeBranches", &p.GetAllHistoryTreeBranchesRequest{ + PageSize: pageSize, + }).Return(&p.GetAllHistoryTreeBranchesResponse{ + NextPageToken: []byte("page1"), + Branches: []p.HistoryBranchDetail{ + { + //skip + TreeID: "treeID1", + BranchID: "branchID1", + ForkTime: time.Now(), + Info: p.BuildHistoryGarbageCleanupInfo("domainID1", "workflowID1", "runID1"), + }, + { + // split error + TreeID: "treeID2", + BranchID: "branchID2", + ForkTime: time.Now().Add(-cleanUpThreshold * 2), + Info: "error-info", + }, + }, + }, nil).Once() + db.On("GetAllHistoryTreeBranches", &p.GetAllHistoryTreeBranchesRequest{ + PageSize: pageSize, + NextPageToken: []byte("page1"), + }).Return(&p.GetAllHistoryTreeBranchesResponse{ + Branches: []p.HistoryBranchDetail{ + { + //delete succ + TreeID: "treeID3", + BranchID: "branchID3", + ForkTime: time.Now().Add(-cleanUpThreshold * 2), + Info: p.BuildHistoryGarbageCleanupInfo("domainID3", "workflowID3", "runID3"), + }, + { + // delete fail + TreeID: "treeID4", + BranchID: "branchID4", + ForkTime: time.Now().Add(-cleanUpThreshold * 2), + Info: p.BuildHistoryGarbageCleanupInfo("domainID4", "workflowID4", "runID4"), + }, + { + //not delete + TreeID: "treeID5", + BranchID: "branchID5", + ForkTime: time.Now().Add(-cleanUpThreshold * 2), + Info: p.BuildHistoryGarbageCleanupInfo("domainID5", "workflowID5", "runID5"), + }, + }, + }, nil).Once() + + client.EXPECT().DescribeMutableState(gomock.Any(), &history.DescribeMutableStateRequest{ + DomainUUID: common.StringPtr("domainID3"), + Execution: &shared.WorkflowExecution{ + WorkflowId: common.StringPtr("workflowID3"), + RunId: common.StringPtr("runID3"), + }, + }).Return(nil, &shared.EntityNotExistsError{}) + + client.EXPECT().DescribeMutableState(gomock.Any(), &history.DescribeMutableStateRequest{ + DomainUUID: common.StringPtr("domainID4"), + Execution: &shared.WorkflowExecution{ + WorkflowId: common.StringPtr("workflowID4"), + RunId: common.StringPtr("runID4"), + }, + }).Return(nil, &shared.EntityNotExistsError{}) + client.EXPECT().DescribeMutableState(gomock.Any(), &history.DescribeMutableStateRequest{ + DomainUUID: common.StringPtr("domainID5"), + Execution: &shared.WorkflowExecution{ + WorkflowId: common.StringPtr("workflowID5"), + RunId: common.StringPtr("runID5"), + }, + }).Return(nil, nil) + + branchToken3, err := p.NewHistoryBranchTokenByBranchID("treeID3", "branchID3") + s.Nil(err) + db.On("DeleteHistoryBranch", &p.DeleteHistoryBranchRequest{ + BranchToken: branchToken3, + ShardID: common.IntPtr(1), + }).Return(nil).Once() + + branchToken4, err := p.NewHistoryBranchTokenByBranchID("treeID4", "branchID4") + s.Nil(err) + db.On("DeleteHistoryBranch", &p.DeleteHistoryBranchRequest{ + BranchToken: branchToken4, + ShardID: common.IntPtr(1), + }).Return(fmt.Errorf("failed to delete history")).Once() + + hbd, err := scvgr.Run(context.Background()) + s.Nil(err) + s.Equal(1, hbd.SkipCount) + s.Equal(2, hbd.SuccCount) + s.Equal(2, hbd.ErrorCount) + s.Equal(2, hbd.CurrentPage) + s.Equal(0, len(hbd.NextPageToken)) +} diff --git a/service/worker/scanner/scanner.go b/service/worker/scanner/scanner.go index 901f0eea13f..c00e48d521f 100644 --- a/service/worker/scanner/scanner.go +++ b/service/worker/scanner/scanner.go @@ -25,6 +25,7 @@ import ( "time" "github.com/uber-go/tally" + "github.com/uber/cadence/client" "github.com/uber/cadence/common" "github.com/uber/cadence/common/backoff" "github.com/uber/cadence/common/cluster" @@ -60,6 +61,8 @@ type ( Config Config // SDKClient is an instance of cadence sdk client SDKClient workflowserviceclient.Interface + // clientBean is an instance of ClientBean + ClientBean client.Bean // MetricsClient is an instance of metrics object for emitting stats MetricsClient metrics.Client Logger log.Logger @@ -72,8 +75,10 @@ type ( scannerContext struct { taskDB p.TaskManager domainDB p.MetadataManager + historyDB p.HistoryV2Manager cfg Config sdkClient workflowserviceclient.Interface + clientBean client.Bean metricsClient metrics.Client tallyScope tally.Scope logger log.Logger @@ -104,6 +109,7 @@ func New(params *BootstrapParams) *Scanner { context: scannerContext{ cfg: cfg, sdkClient: params.SDKClient, + clientBean: params.ClientBean, metricsClient: params.MetricsClient, tallyScope: params.TallyScope, zapLogger: zapLogger, @@ -124,35 +130,41 @@ func (s *Scanner) Start() error { MaxConcurrentDecisionTaskExecutionSize: maxConcurrentDecisionTaskExecutionSize, BackgroundActivityContext: context.WithValue(context.Background(), scannerContextKey, s.context), } - go s.startWorkflowWithRetry() + + if s.context.cfg.Persistence.DefaultStoreType() == config.StoreTypeSQL { + go s.startWorkflowWithRetry(tlScannerWFStartOptions, tlScannerWFTypeName) + } else if s.context.cfg.Persistence.DefaultStoreType() == config.StoreTypeCassandra { + go s.startWorkflowWithRetry(historyScannerWFStartOptions, historyScannerWFTypeName) + } + worker := worker.New(s.context.sdkClient, common.SystemLocalDomainName, tlScannerTaskListName, workerOpts) return worker.Start() } -func (s *Scanner) startWorkflowWithRetry() error { +func (s *Scanner) startWorkflowWithRetry(options cclient.StartWorkflowOptions, wfType string) error { client := cclient.NewClient(s.context.sdkClient, common.SystemLocalDomainName, &cclient.Options{}) policy := backoff.NewExponentialRetryPolicy(time.Second) policy.SetMaximumInterval(time.Minute) policy.SetExpirationInterval(backoff.NoInterval) return backoff.Retry(func() error { - return s.startWorkflow(client) + return s.startWorkflow(client, options, wfType) }, policy, func(err error) bool { return true }) } -func (s *Scanner) startWorkflow(client cclient.Client) error { +func (s *Scanner) startWorkflow(client cclient.Client, options cclient.StartWorkflowOptions, wfType string) error { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - _, err := client.StartWorkflow(ctx, tlScannerWFStartOptions, tlScannerWFTypeName) + _, err := client.StartWorkflow(ctx, options, wfType) cancel() if err != nil { if _, ok := err.(*shared.WorkflowExecutionAlreadyStartedError); ok { return nil } - s.context.logger.Error("error starting scanner workflow", tag.Error(err)) + s.context.logger.Error("error starting "+wfType+" workflow", tag.Error(err)) return err } - s.context.logger.Info("Scanner workflow successfully started") + s.context.logger.Info(wfType + " workflow successfully started") return nil } @@ -167,7 +179,12 @@ func (s *Scanner) buildContext() error { if err != nil { return err } + historyDB, err := pFactory.NewHistoryV2Manager() + if err != nil { + return err + } s.context.taskDB = taskDB s.context.domainDB = domainDB + s.context.historyDB = historyDB return nil } diff --git a/service/worker/scanner/workflow.go b/service/worker/scanner/workflow.go index c5012e59120..1a328ae6099 100644 --- a/service/worker/scanner/workflow.go +++ b/service/worker/scanner/workflow.go @@ -25,6 +25,7 @@ import ( "time" "github.com/uber/cadence/common/log/tag" + "github.com/uber/cadence/service/worker/scanner/history" "github.com/uber/cadence/service/worker/scanner/tasklist" "go.uber.org/cadence" "go.uber.org/cadence/activity" @@ -32,7 +33,9 @@ import ( "go.uber.org/cadence/workflow" ) -type contextKey int +type ( + contextKey int +) const ( scannerContextKey = contextKey(0) @@ -45,17 +48,28 @@ const ( tlScannerWFTypeName = "cadence-sys-tl-scanner-workflow" tlScannerTaskListName = "cadence-sys-tl-scanner-tasklist-0" taskListScavengerActivityName = "cadence-sys-tl-scanner-scvg-activity" + + historyScannerWFID = "cadence-sys-history-scanner" + historyScannerWFTypeName = "cadence-sys-history-scanner-workflow" + historyScannerTaskListName = "cadence-sys-history-scanner-tasklist-0" + historyScavengerActivityName = "cadence-sys-history-scanner-scvg-activity" ) var ( tlScavengerHBInterval = 10 * time.Second - tlScavengerActivityRetryPolicy = cadence.RetryPolicy{ + activityRetryPolicy = cadence.RetryPolicy{ InitialInterval: 10 * time.Second, BackoffCoefficient: 1.7, MaximumInterval: 5 * time.Minute, ExpirationInterval: infiniteDuration, } + activityOptions = workflow.ActivityOptions{ + ScheduleToStartTimeout: 5 * time.Minute, + StartToCloseTimeout: infiniteDuration, + HeartbeatTimeout: 5 * time.Minute, + RetryPolicy: &activityRetryPolicy, + } tlScannerWFStartOptions = cclient.StartWorkflowOptions{ ID: tlScannerWFID, TaskList: tlScannerTaskListName, @@ -63,25 +77,50 @@ var ( WorkflowIDReusePolicy: cclient.WorkflowIDReusePolicyAllowDuplicate, CronSchedule: "0 */12 * * *", } + historyScannerWFStartOptions = cclient.StartWorkflowOptions{ + ID: historyScannerWFID, + TaskList: historyScannerTaskListName, + ExecutionStartToCloseTimeout: infiniteDuration, + WorkflowIDReusePolicy: cclient.WorkflowIDReusePolicyAllowDuplicate, + } ) func init() { workflow.RegisterWithOptions(TaskListScannerWorkflow, workflow.RegisterOptions{Name: tlScannerWFTypeName}) + workflow.RegisterWithOptions(HistoryScannerWorkflow, workflow.RegisterOptions{Name: historyScannerWFTypeName}) activity.RegisterWithOptions(TaskListScavengerActivity, activity.RegisterOptions{Name: taskListScavengerActivityName}) + activity.RegisterWithOptions(HistoryScavengerActivity, activity.RegisterOptions{Name: historyScavengerActivityName}) } // TaskListScannerWorkflow is the workflow that runs the task-list scanner background daemon func TaskListScannerWorkflow(ctx workflow.Context) error { - opts := workflow.ActivityOptions{ - ScheduleToStartTimeout: 5 * time.Minute, - StartToCloseTimeout: infiniteDuration, - HeartbeatTimeout: 5 * time.Minute, - RetryPolicy: &tlScavengerActivityRetryPolicy, - } - future := workflow.ExecuteActivity(workflow.WithActivityOptions(ctx, opts), taskListScavengerActivityName) + + future := workflow.ExecuteActivity(workflow.WithActivityOptions(ctx, activityOptions), taskListScavengerActivityName) + return future.Get(ctx, nil) +} + +// HistoryScannerWorkflow is the workflow that runs the history scanner background daemon +func HistoryScannerWorkflow(ctx workflow.Context) error { + future := workflow.ExecuteActivity(workflow.WithActivityOptions(ctx, activityOptions), historyScavengerActivityName) return future.Get(ctx, nil) } +// HistoryScavengerActivity is the activity that runs history scavenger +func HistoryScavengerActivity(aCtx context.Context) (history.ScavengerHeartbeatDetails, error) { + ctx := aCtx.Value(scannerContextKey).(scannerContext) + rps := ctx.cfg.PersistenceMaxQPS() + + hbd := history.ScavengerHeartbeatDetails{} + if activity.HasHeartbeatDetails(aCtx) { + if err := activity.GetHeartbeatDetails(aCtx, &hbd); err != nil { + ctx.logger.Error("Failed to recover from last heartbeat, start over from beginning", tag.Error(err)) + } + } + + scavenger := history.NewScavenger(ctx.historyDB, rps, ctx.clientBean.GetHistoryClient(), hbd, ctx.metricsClient, ctx.logger) + return scavenger.Run(aCtx) +} + // TaskListScavengerActivity is the activity that runs task list scavenger func TaskListScavengerActivity(aCtx context.Context) error { ctx := aCtx.Value(scannerContextKey).(scannerContext) diff --git a/service/worker/service.go b/service/worker/service.go index 48ad7312c55..776403b8244 100644 --- a/service/worker/service.go +++ b/service/worker/service.go @@ -36,11 +36,11 @@ import ( "github.com/uber/cadence/common/persistence" persistencefactory "github.com/uber/cadence/common/persistence/persistence-factory" "github.com/uber/cadence/common/service" - "github.com/uber/cadence/common/service/config" "github.com/uber/cadence/common/service/dynamicconfig" "github.com/uber/cadence/service/worker/archiver" "github.com/uber/cadence/service/worker/batcher" "github.com/uber/cadence/service/worker/indexer" + "github.com/uber/cadence/service/worker/parentclosepolicy" "github.com/uber/cadence/service/worker/replicator" "github.com/uber/cadence/service/worker/scanner" ) @@ -61,13 +61,14 @@ type ( // Config contains all the service config for worker Config struct { - ReplicationCfg *replicator.Config - ArchiverConfig *archiver.Config - IndexerCfg *indexer.Config - ScannerCfg *scanner.Config - BatcherCfg *batcher.Config - ThrottledLogRPS dynamicconfig.IntPropertyFn - EnableBatcher dynamicconfig.BoolPropertyFn + ReplicationCfg *replicator.Config + ArchiverConfig *archiver.Config + IndexerCfg *indexer.Config + ScannerCfg *scanner.Config + BatcherCfg *batcher.Config + ThrottledLogRPS dynamicconfig.IntPropertyFn + EnableBatcher dynamicconfig.BoolPropertyFn + EnableParentClosePolicyWorker dynamicconfig.BoolPropertyFn } ) @@ -113,8 +114,9 @@ func NewConfig(params *service.BootstrapParams) *Config { AdminOperationToken: dc.GetStringProperty(dynamicconfig.AdminOperationToken, common.DefaultAdminOperationToken), ClusterMetadata: params.ClusterMetadata, }, - EnableBatcher: dc.GetBoolProperty(dynamicconfig.EnableBatcher, false), - ThrottledLogRPS: dc.GetIntProperty(dynamicconfig.WorkerThrottledLogRPS, 20), + EnableBatcher: dc.GetBoolProperty(dynamicconfig.EnableBatcher, false), + EnableParentClosePolicyWorker: dc.GetBoolProperty(dynamicconfig.EnableParentClosePolicyWorker, true), + ThrottledLogRPS: dc.GetIntProperty(dynamicconfig.WorkerThrottledLogRPS, 20), } advancedVisWritingMode := dc.GetStringProperty( dynamicconfig.AdvancedVisibilityWritingMode, @@ -147,29 +149,26 @@ func (s *Service) Start() { replicatorEnabled := base.GetClusterMetadata().IsGlobalDomainEnabled() archiverEnabled := base.GetArchivalMetadata().GetHistoryConfig().ClusterConfiguredForArchival() - scannerEnabled := s.config.ScannerCfg.Persistence.DefaultStoreType() == config.StoreTypeSQL batcherEnabled := s.config.EnableBatcher() + parentClosePolicyEnabled := s.config.EnableParentClosePolicyWorker() - if replicatorEnabled || archiverEnabled || scannerEnabled || batcherEnabled { - pConfig := s.params.PersistenceConfig - pConfig.SetMaxQPS(pConfig.DefaultStore, s.config.ReplicationCfg.PersistenceMaxQPS()) - pFactory := persistencefactory.New(&pConfig, s.params.ClusterMetadata.GetCurrentClusterName(), s.metricsClient, s.logger) + pConfig := s.params.PersistenceConfig + pConfig.SetMaxQPS(pConfig.DefaultStore, s.config.ReplicationCfg.PersistenceMaxQPS()) + pFactory := persistencefactory.New(&pConfig, s.params.ClusterMetadata.GetCurrentClusterName(), s.metricsClient, s.logger) + s.ensureSystemDomainExists(pFactory, base.GetClusterMetadata().GetCurrentClusterName()) - if archiverEnabled || scannerEnabled { - s.ensureSystemDomainExists(pFactory, base.GetClusterMetadata().GetCurrentClusterName()) - } - if replicatorEnabled { - s.startReplicator(base, pFactory) - } - if archiverEnabled { - s.startArchiver(base, pFactory) - } - if scannerEnabled { - s.startScanner(base) - } - if batcherEnabled { - s.startBatcher(base) - } + s.startScanner(base) + if replicatorEnabled { + s.startReplicator(base, pFactory) + } + if archiverEnabled { + s.startArchiver(base, pFactory) + } + if batcherEnabled { + s.startBatcher(base) + } + if parentClosePolicyEnabled { + s.startParentClosePolicyProcessor(base) } s.logger.Info("service started", tag.ComponentWorker) @@ -186,6 +185,20 @@ func (s *Service) Stop() { s.params.Logger.Info("service stopped", tag.ComponentWorker) } +func (s *Service) startParentClosePolicyProcessor(base service.Service) { + params := &parentclosepolicy.BootstrapParams{ + ServiceClient: s.params.PublicClient, + MetricsClient: s.metricsClient, + Logger: s.logger, + TallyScope: s.params.MetricScope, + ClientBean: base.GetClientBean(), + } + processor := parentclosepolicy.New(params) + if err := processor.Start(); err != nil { + s.logger.Fatal("error starting parentclosepolicy processor", tag.Error(err)) + } +} + func (s *Service) startBatcher(base service.Service) { params := &batcher.BootstrapParams{ Config: *s.config.BatcherCfg, @@ -205,6 +218,7 @@ func (s *Service) startScanner(base service.Service) { params := &scanner.BootstrapParams{ Config: *s.config.ScannerCfg, SDKClient: s.params.PublicClient, + ClientBean: base.GetClientBean(), MetricsClient: s.metricsClient, Logger: s.logger, TallyScope: s.params.MetricScope, diff --git a/tools/cli/app_test.go b/tools/cli/app_test.go index ebcf777c2f5..7d2542818a0 100644 --- a/tools/cli/app_test.go +++ b/tools/cli/app_test.go @@ -635,8 +635,10 @@ func (s *cliAppSuite) TestGetWorkflowIdReusePolicy_Failed_Negative() { func (s *cliAppSuite) TestGetSearchAttributes() { resp := &shared.GetSearchAttributesResponse{} - s.clientFrontendClient.EXPECT().GetSearchAttributes(gomock.Any(), callOptions...).Return(resp, nil).Times(1) - err := s.app.Run([]string{"", "--do", domainName, "cluster", "get-search-attr"}) + s.clientFrontendClient.EXPECT().GetSearchAttributes(gomock.Any(), callOptions...).Return(resp, nil).Times(2) + err := s.app.Run([]string{"", "cluster", "get-search-attr"}) + s.Nil(err) + err = s.app.Run([]string{"", "--do", domainName, "cluster", "get-search-attr"}) s.Nil(err) } @@ -677,7 +679,76 @@ func (s *cliAppSuite) TestConvertStringToRealType() { res = convertStringToRealType("2019-01-01T01:01:01Z") s.Equal(time.Date(2019, 1, 1, 1, 1, 1, 0, time.UTC), res) + // array + res = convertStringToRealType(`["a", "b", "c"]`) + s.Equal([]interface{}{"a", "b", "c"}, res) + // string res = convertStringToRealType("test string") s.Equal("test string", res) } + +func (s *cliAppSuite) TestConvertArray() { + t1, _ := time.Parse(defaultDateTimeFormat, "2019-06-07T16:16:34-08:00") + t2, _ := time.Parse(defaultDateTimeFormat, "2019-06-07T17:16:34-08:00") + testCases := []struct { + name string + input string + expected interface{} + }{ + { + name: "string", + input: `["a", "b", "c"]`, + expected: []interface{}{"a", "b", "c"}, + }, + { + name: "int", + input: `[1, 2, 3]`, + expected: []interface{}{"1", "2", "3"}, + }, + { + name: "double", + input: `[1.1, 2.2, 3.3]`, + expected: []interface{}{"1.1", "2.2", "3.3"}, + }, + { + name: "bool", + input: `["true", "false"]`, + expected: []interface{}{"true", "false"}, + }, + { + name: "datetime", + input: `["2019-06-07T16:16:34-08:00", "2019-06-07T17:16:34-08:00"]`, + expected: []interface{}{t1, t2}, + }, + } + for _, testCase := range testCases { + res, err := parseArray(testCase.input) + s.Nil(err) + s.Equal(testCase.expected, res) + } + + testCases2 := []struct { + name string + input string + expected error + }{ + { + name: "not array", + input: "normal string", + }, + { + name: "empty string", + input: "", + }, + { + name: "not json array", + input: "[a, b, c]", + }, + } + for _, testCase := range testCases2 { + res, err := parseArray(testCase.input) + s.NotNil(err) + s.Nil(res) + } +} diff --git a/tools/cli/clusterCommands.go b/tools/cli/clusterCommands.go index cd262c9a47b..be24d900b65 100644 --- a/tools/cli/clusterCommands.go +++ b/tools/cli/clusterCommands.go @@ -30,7 +30,7 @@ import ( // GetSearchAttributes get valid search attributes func GetSearchAttributes(c *cli.Context) { - wfClient := getWorkflowClient(c) + wfClient := getWorkflowClientWithOptionalDomain(c) ctx, cancel := newContext(c) defer cancel() diff --git a/tools/cli/flags.go b/tools/cli/flags.go index 0139dd411d5..89cd4dfcdcf 100644 --- a/tools/cli/flags.go +++ b/tools/cli/flags.go @@ -314,6 +314,7 @@ func getFlagsForStart() []cli.Flag { cli.StringFlag{ Name: FlagSearchAttributesVal, Usage: "Optional search attributes value that can be be used in list query. If there are multiple keys, concatenate them and separate by |. " + + "If value is array, use json array like [\"a\",\"b\"], [1,2], [\"true\",\"false\"], [\"2019-06-07T17:16:34-08:00\",\"2019-06-07T18:16:34-08:00\"]. " + "Use 'cluster get-search-attr' cmd to list legal keys and value types", }, } diff --git a/tools/cli/util.go b/tools/cli/util.go index b1010d2c9a7..2d88952bcd9 100644 --- a/tools/cli/util.go +++ b/tools/cli/util.go @@ -24,6 +24,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -37,6 +38,7 @@ import ( "github.com/fatih/color" "github.com/uber/cadence/common" "github.com/urfave/cli" + "github.com/valyala/fastjson" s "go.uber.org/cadence/.gen/go/shared" "go.uber.org/cadence/client" ) @@ -526,6 +528,13 @@ func getWorkflowClient(c *cli.Context) client.Client { return client.NewClient(cFactory.ClientFrontendClient(c), domain, &client.Options{}) } +func getWorkflowClientWithOptionalDomain(c *cli.Context) client.Client { + if !c.GlobalIsSet(FlagDomain) { + c.GlobalSet(FlagDomain, "system-domain") + } + return getWorkflowClient(c) +} + func getRequiredOption(c *cli.Context, optionName string) string { value := c.String(optionName) if len(value) == 0 { @@ -706,6 +715,33 @@ func trimSpace(strs []string) []string { return result } +func parseArray(v string) (interface{}, error) { + if len(v) > 0 && v[0] == '[' && v[len(v)-1] == ']' { + parsedValues, err := fastjson.Parse(v) + if err != nil { + return nil, err + } + arr, err := parsedValues.Array() + if err != nil { + return nil, err + } + result := make([]interface{}, len(arr)) + for i, item := range arr { + s := item.String() + if len(s) >= 2 && s[0] == '"' && s[len(s)-1] == '"' { // remove addition quote from json + s = s[1 : len(s)-1] + if sTime, err := time.Parse(defaultDateTimeFormat, s); err == nil { + result[i] = sTime + continue + } + } + result[i] = s + } + return result, nil + } + return nil, errors.New("not array") +} + func convertStringToRealType(v string) interface{} { var genVal interface{} var err error @@ -718,6 +754,8 @@ func convertStringToRealType(v string) interface{} { } else if genVal, err = time.Parse(defaultDateTimeFormat, v); err == nil { + } else if genVal, err = parseArray(v); err == nil { + } else { genVal = v } diff --git a/tools/cli/workflowCommands.go b/tools/cli/workflowCommands.go index 4b1abfdde3a..84c1baaf99b 100644 --- a/tools/cli/workflowCommands.go +++ b/tools/cli/workflowCommands.go @@ -785,13 +785,14 @@ type pendingActivityInfo struct { State *shared.PendingActivityState ScheduledTimestamp *string `json:",omitempty"` // change from *int64 LastStartedTimestamp *string `json:",omitempty"` // change from *int64 - HeartbeatDetails *string `json:",omitempty"` // change from byte[] + HeartbeatDetails *string `json:",omitempty"` // change from []byte LastHeartbeatTimestamp *string `json:",omitempty"` // change from *int64 Attempt *int32 `json:",omitempty"` MaximumAttempts *int32 `json:",omitempty"` ExpirationTimestamp *string `json:",omitempty"` // change from *int64 LastFailureReason *string `json:",omitempty"` LastWorkerIdentity *string `json:",omitempty"` + LastFailureDetails *string `json:",omitempty"` // change from []byte } func convertDescribeWorkflowExecutionResponse(resp *shared.DescribeWorkflowExecutionResponse, @@ -830,6 +831,9 @@ func convertDescribeWorkflowExecutionResponse(resp *shared.DescribeWorkflowExecu if pa.HeartbeatDetails != nil { tmpAct.HeartbeatDetails = common.StringPtr(string(pa.HeartbeatDetails)) } + if pa.LastFailureDetails != nil { + tmpAct.LastFailureDetails = common.StringPtr(string(pa.LastFailureDetails)) + } pendingActs = append(pendingActs, tmpAct) }