From 602b810018e346ae797012be52418e09e0a74314 Mon Sep 17 00:00:00 2001 From: Liang Mei Date: Wed, 14 Aug 2019 17:11:06 -0700 Subject: [PATCH] Introduce pull based replicator that does not use kafka (#2377) - Created replication message fetcher and processor on standby history side. Fetcher is per node and processor is per shard. - Fetch requests are aggregated on box level and then send to source frontend. - Implemented fan out logic on source frontend to get replication messages from local history shards. - Introduced replication consumer config to control whether to use kafka based consumer or rpc based consumer. --- .gen/go/cadence/cadence.go | 707 ++++++++++- .../cadence/workflowserviceclient/client.go | 30 + .../cadence/workflowserviceserver/server.go | 38 +- .gen/go/cadence/workflowservicetest/client.go | 34 + .gen/go/history/history.go | 713 ++++++++++- .../go/history/historyserviceclient/client.go | 30 + .../go/history/historyserviceserver/server.go | 38 +- .gen/go/history/historyservicetest/client.go | 34 + .gen/go/replicator/replicator.go | 1119 ++++++++++++++++- .gen/go/shared/shared.go | 2 +- .gen/go/sqlblobs/sqlblobs.go | 52 +- Gopkg.lock | 28 +- client/clientfactory.go | 9 +- client/frontend/client.go | 19 +- client/frontend/metricClient.go | 18 + client/frontend/retryableClient.go | 16 + client/history/client.go | 59 + client/history/metricClient.go | 19 + client/history/retryableClient.go | 17 + cmd/server/server.go | 1 + common/cluster/metadata.go | 14 + common/cluster/metadataTestBase.go | 10 + common/daemon.go | 2 +- common/metrics/defs.go | 20 + common/mocks/ClusterMetadata.go | 14 + common/mocks/FrontendClient.go | 31 + common/mocks/HistoryClient.go | 29 +- .../cassandra/cassandraPersistence.go | 5 +- .../cassandra/cassandraPersistenceUtil.go | 5 + common/persistence/dataInterfaces.go | 1 + .../persistence-tests/executionManagerTest.go | 19 +- common/persistence/sql/sqlShardManager.go | 6 + common/service/config/config.go | 27 +- common/service/service.go | 2 +- config/development_active.yaml | 9 +- config/development_standby.yaml | 9 +- host/testcluster.go | 1 + .../xdc_integration_test_clusters.yaml | 14 + host/xdc/integration_failover_test.go | 9 +- idl/github.com/uber/cadence/cadence.thrift | 9 + idl/github.com/uber/cadence/history.thrift | 10 + idl/github.com/uber/cadence/replicator.thrift | 25 +- idl/github.com/uber/cadence/sqlblobs.thrift | 1 + schema/cassandra/cadence/schema.cql | 2 + .../v0.22/cluster_replication_level.cql | 1 + .../cadence/versioned/v0.22/manifest.json | 8 + service/frontend/dcRedirectionHandler.go | 11 + service/frontend/workflowHandler.go | 26 + service/frontend/workflowHandler_mock.go | 24 + service/history/MockHistoryEngine.go | 24 + service/history/conflictResolver_test.go | 12 +- service/history/handler.go | 120 +- service/history/historyEngine.go | 76 +- service/history/historyEngine2_test.go | 11 +- .../history/historyEngine3_eventsv2_test.go | 12 +- service/history/historyEngineInterfaces.go | 8 + service/history/historyEngine_test.go | 11 +- service/history/historyReplicator_test.go | 12 +- service/history/historyTestBase.go | 21 + service/history/replicationTaskFetcher.go | 215 ++++ service/history/replicationTaskProcessor.go | 317 +++++ service/history/replicatorQueueProcessor.go | 116 +- .../history/replicatorQueueProcessor_mock.go | 105 ++ service/history/shardContext.go | 30 + service/history/shardController_test.go | 24 +- service/history/timerQueueProcessor2_test.go | 11 +- service/history/timerQueueProcessor_test.go | 10 +- .../timerQueueStandbyProcessor_test.go | 11 +- .../transferQueueActiveProcessor_test.go | 12 +- .../transferQueueStandbyProcessor_test.go | 11 +- service/history/workflowResetor_test.go | 11 +- tools/cassandra/updateTask_test.go | 2 +- 72 files changed, 4293 insertions(+), 216 deletions(-) create mode 100644 schema/cassandra/cadence/versioned/v0.22/cluster_replication_level.cql create mode 100644 schema/cassandra/cadence/versioned/v0.22/manifest.json create mode 100644 service/history/replicationTaskFetcher.go create mode 100644 service/history/replicationTaskProcessor.go create mode 100644 service/history/replicatorQueueProcessor_mock.go diff --git a/.gen/go/cadence/cadence.go b/.gen/go/cadence/cadence.go index 1c7e96d5fbf..35e8cd0d04a 100644 --- a/.gen/go/cadence/cadence.go +++ b/.gen/go/cadence/cadence.go @@ -20,7 +20,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -// Code generated by thriftrw v1.20.0. DO NOT EDIT. +// Code generated by thriftrw v1.20.1. DO NOT EDIT. // @generated package cadence @@ -28,6 +28,7 @@ package cadence import ( errors "errors" fmt "fmt" + replicator "github.com/uber/cadence/.gen/go/replicator" shared "github.com/uber/cadence/.gen/go/shared" multierr "go.uber.org/multierr" thriftreflect "go.uber.org/thriftrw/thriftreflect" @@ -41,14 +42,15 @@ var ThriftModule = &thriftreflect.ThriftModule{ Name: "cadence", Package: "github.com/uber/cadence/.gen/go/cadence", FilePath: "cadence.thrift", - SHA1: "2d75e7300db85eedf440c3e1f8441926c3c6e7d6", + SHA1: "daa780b633210e284b4d5498758135b708560d55", Includes: []*thriftreflect.ThriftModule{ + replicator.ThriftModule, shared.ThriftModule, }, Raw: rawIDL, } -const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\ninclude \"shared.thrift\"\n\nnamespace java com.uber.cadence\n\n/**\n* WorkflowService API is exposed to provide support for long running applications. Application is expected to call\n* StartWorkflowExecution to create an instance for each instance of long running workflow. Such applications are expected\n* to have a worker which regularly polls for DecisionTask and ActivityTask from the WorkflowService. For each\n* DecisionTask, application is expected to process the history of events for that session and respond back with next\n* decisions. For each ActivityTask, application is expected to execute the actual logic for that task and respond back\n* with completion or failure. Worker is expected to regularly heartbeat while activity task is running.\n**/\nservice WorkflowService {\n /**\n * RegisterDomain creates a new domain which can be used as a container for all resources. Domain is a top level\n * entity within Cadence, used as a container for all resources like workflow executions, tasklists, etc. Domain\n * acts as a sandbox and provides isolation for all resources within the domain. All resources belongs to exactly one\n * domain.\n **/\n void RegisterDomain(1: shared.RegisterDomainRequest registerRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.DomainAlreadyExistsError domainExistsError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * DescribeDomain returns the information and configuration for a registered domain.\n **/\n shared.DescribeDomainResponse DescribeDomain(1: shared.DescribeDomainRequest describeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * ListDomains returns the information and configuration for all domains.\n **/\n shared.ListDomainsResponse ListDomains(1: shared.ListDomainsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * UpdateDomain is used to update the information and configuration for a registered domain.\n **/\n shared.UpdateDomainResponse UpdateDomain(1: shared.UpdateDomainRequest updateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * DeprecateDomain us used to update status of a registered domain to DEPRECATED. Once the domain is deprecated\n * it cannot be used to start new workflow executions. Existing workflow executions will continue to run on\n * deprecated domains.\n **/\n void DeprecateDomain(1: shared.DeprecateDomainRequest deprecateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * StartWorkflowExecution starts a new long running workflow instance. It will create the instance with\n * 'WorkflowExecutionStarted' event in history and also schedule the first DecisionTask for the worker to make the\n * first decision for this instance. It will return 'WorkflowExecutionAlreadyStartedError', if an instance already\n * exists with same workflowId.\n **/\n shared.StartWorkflowExecutionResponse StartWorkflowExecution(1: shared.StartWorkflowExecutionRequest startRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.EntityNotExistsError entityNotExistError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * Returns the history of specified workflow execution. It fails with 'EntityNotExistError' if speficied workflow\n * execution in unknown to the service.\n **/\n shared.GetWorkflowExecutionHistoryResponse GetWorkflowExecutionHistory(1: shared.GetWorkflowExecutionHistoryRequest getRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * PollForDecisionTask is called by application worker to process DecisionTask from a specific taskList. A\n * DecisionTask is dispatched to callers for active workflow executions, with pending decisions.\n * Application is then expected to call 'RespondDecisionTaskCompleted' API when it is done processing the DecisionTask.\n * It will also create a 'DecisionTaskStarted' event in the history for that session before handing off DecisionTask to\n * application worker.\n **/\n shared.PollForDecisionTaskResponse PollForDecisionTask(1: shared.PollForDecisionTaskRequest pollRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.ServiceBusyError serviceBusyError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.EntityNotExistsError entityNotExistError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RespondDecisionTaskCompleted is called by application worker to complete a DecisionTask handed as a result of\n * 'PollForDecisionTask' API call. Completing a DecisionTask will result in new events for the workflow execution and\n * potentially new ActivityTask being created for corresponding decisions. It will also create a DecisionTaskCompleted\n * event in the history for that session. Use the 'taskToken' provided as response of PollForDecisionTask API call\n * for completing the DecisionTask.\n * The response could contain a new decision task if there is one or if the request asking for one.\n **/\n shared.RespondDecisionTaskCompletedResponse RespondDecisionTaskCompleted(1: shared.RespondDecisionTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RespondDecisionTaskFailed is called by application worker to indicate failure. This results in\n * DecisionTaskFailedEvent written to the history and a new DecisionTask created. This API can be used by client to\n * either clear sticky tasklist or report any panics during DecisionTask processing. Cadence will only append first\n * DecisionTaskFailed event to the history of workflow execution for consecutive failures.\n **/\n void RespondDecisionTaskFailed(1: shared.RespondDecisionTaskFailedRequest failedRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * PollForActivityTask is called by application worker to process ActivityTask from a specific taskList. ActivityTask\n * is dispatched to callers whenever a ScheduleTask decision is made for a workflow execution.\n * Application is expected to call 'RespondActivityTaskCompleted' or 'RespondActivityTaskFailed' once it is done\n * processing the task.\n * Application also needs to call 'RecordActivityTaskHeartbeat' API within 'heartbeatTimeoutSeconds' interval to\n * prevent the task from getting timed out. An event 'ActivityTaskStarted' event is also written to workflow execution\n * history before the ActivityTask is dispatched to application worker.\n **/\n shared.PollForActivityTaskResponse PollForActivityTask(1: shared.PollForActivityTaskRequest pollRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.ServiceBusyError serviceBusyError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.EntityNotExistsError entityNotExistError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RecordActivityTaskHeartbeat is called by application worker while it is processing an ActivityTask. If worker fails\n * to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and\n * 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeat' will\n * fail with 'EntityNotExistsError' in such situations. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for heartbeating.\n **/\n shared.RecordActivityTaskHeartbeatResponse RecordActivityTaskHeartbeat(1: shared.RecordActivityTaskHeartbeatRequest heartbeatRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RecordActivityTaskHeartbeatByID is called by application worker while it is processing an ActivityTask. If worker fails\n * to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and\n * 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeatByID' will\n * fail with 'EntityNotExistsError' in such situations. Instead of using 'taskToken' like in RecordActivityTaskHeartbeat,\n * use Domain, WorkflowID and ActivityID\n **/\n shared.RecordActivityTaskHeartbeatResponse RecordActivityTaskHeartbeatByID(1: shared.RecordActivityTaskHeartbeatByIDRequest heartbeatRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RespondActivityTaskCompleted is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new DecisionTask\n * created for the workflow so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCompleted(1: shared.RespondActivityTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RespondActivityTaskCompletedByID is called by application worker when it is done processing an ActivityTask.\n * It will result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new DecisionTask\n * created for the workflow so new decisions could be made. Similar to RespondActivityTaskCompleted but use Domain,\n * WorkflowID and ActivityID instead of 'taskToken' for completion. It fails with 'EntityNotExistsError'\n * if the these IDs are not valid anymore due to activity timeout.\n **/\n void RespondActivityTaskCompletedByID(1: shared.RespondActivityTaskCompletedByIDRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RespondActivityTaskFailed is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskFailed' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskFailed(1: shared.RespondActivityTaskFailedRequest failRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RespondActivityTaskFailedByID is called by application worker when it is done processing an ActivityTask.\n * It will result in a new 'ActivityTaskFailed' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Similar to RespondActivityTaskFailed but use\n * Domain, WorkflowID and ActivityID instead of 'taskToken' for completion. It fails with 'EntityNotExistsError'\n * if the these IDs are not valid anymore due to activity timeout.\n **/\n void RespondActivityTaskFailedByID(1: shared.RespondActivityTaskFailedByIDRequest failRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RespondActivityTaskCanceled is called by application worker when it is successfully canceled an ActivityTask. It will\n * result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCanceled(1: shared.RespondActivityTaskCanceledRequest canceledRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RespondActivityTaskCanceledByID is called by application worker when it is successfully canceled an ActivityTask.\n * It will result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Similar to RespondActivityTaskCanceled but use\n * Domain, WorkflowID and ActivityID instead of 'taskToken' for completion. It fails with 'EntityNotExistsError'\n * if the these IDs are not valid anymore due to activity timeout.\n **/\n void RespondActivityTaskCanceledByID(1: shared.RespondActivityTaskCanceledByIDRequest canceledRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RequestCancelWorkflowExecution is called by application worker when it wants to request cancellation of a workflow instance.\n * It will result in a new 'WorkflowExecutionCancelRequested' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. It fails with 'EntityNotExistsError' if the workflow is not valid\n * anymore due to completion or doesn't exist.\n **/\n void RequestCancelWorkflowExecution(1: shared.RequestCancelWorkflowExecutionRequest cancelRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.CancellationAlreadyRequestedError cancellationAlreadyRequestedError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in\n * WorkflowExecutionSignaled event recorded in the history and a decision task being created for the execution.\n **/\n void SignalWorkflowExecution(1: shared.SignalWorkflowExecutionRequest signalRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * SignalWithStartWorkflowExecution is used to ensure sending signal to a workflow.\n * If the workflow is running, this results in WorkflowExecutionSignaled event being recorded in the history\n * and a decision task being created for the execution.\n * If the workflow is not running or not found, this results in WorkflowExecutionStarted and WorkflowExecutionSignaled\n * events being recorded in history, and a decision task being created for the execution\n **/\n shared.StartWorkflowExecutionResponse SignalWithStartWorkflowExecution(1: shared.SignalWithStartWorkflowExecutionRequest signalWithStartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.WorkflowExecutionAlreadyStartedError workflowAlreadyStartedError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * ResetWorkflowExecution reset an existing workflow execution to DecisionTaskCompleted event(exclusive).\n * And it will immediately terminating the current execution instance.\n **/\n shared.ResetWorkflowExecutionResponse ResetWorkflowExecution(1: shared.ResetWorkflowExecutionRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n \n /**\n * TerminateWorkflowExecution terminates an existing workflow execution by recording WorkflowExecutionTerminated event\n * in the history and immediately terminating the execution instance.\n **/\n void TerminateWorkflowExecution(1: shared.TerminateWorkflowExecutionRequest terminateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * ListOpenWorkflowExecutions is a visibility API to list the open executions in a specific domain.\n **/\n shared.ListOpenWorkflowExecutionsResponse ListOpenWorkflowExecutions(1: shared.ListOpenWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * ListClosedWorkflowExecutions is a visibility API to list the closed executions in a specific domain.\n **/\n shared.ListClosedWorkflowExecutionsResponse ListClosedWorkflowExecutions(1: shared.ListClosedWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * ListWorkflowExecutions is a visibility API to list workflow executions in a specific domain.\n **/\n shared.ListWorkflowExecutionsResponse ListWorkflowExecutions(1: shared.ListWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * ScanWorkflowExecutions is a visibility API to list large amount of workflow executions in a specific domain without order.\n **/\n shared.ListWorkflowExecutionsResponse ScanWorkflowExecutions(1: shared.ListWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * CountWorkflowExecutions is a visibility API to count of workflow executions in a specific domain.\n **/\n shared.CountWorkflowExecutionsResponse CountWorkflowExecutions(1: shared.CountWorkflowExecutionsRequest countRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * GetSearchAttributes is a visibility API to get all legal keys that could be used in list APIs\n **/\n shared.GetSearchAttributesResponse GetSearchAttributes()\n throws (\n 1: shared.InternalServiceError internalServiceError,\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RespondQueryTaskCompleted is called by application worker to complete a QueryTask (which is a DecisionTask for query)\n * as a result of 'PollForDecisionTask' API call. Completing a QueryTask will unblock the client call to 'QueryWorkflow'\n * API and return the query result to client as a response to 'QueryWorkflow' API call.\n **/\n void RespondQueryTaskCompleted(1: shared.RespondQueryTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * Reset the sticky tasklist related information in mutable state of a given workflow.\n * Things cleared are:\n * 1. StickyTaskList\n * 2. StickyScheduleToStartTimeout\n * 3. ClientLibraryVersion\n * 4. ClientFeatureVersion\n * 5. ClientImpl\n **/\n shared.ResetStickyTaskListResponse ResetStickyTaskList(1: shared.ResetStickyTaskListRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * QueryWorkflow returns query result for a specified workflow execution\n **/\n shared.QueryWorkflowResponse QueryWorkflow(1: shared.QueryWorkflowRequest queryRequest)\n\tthrows (\n\t 1: shared.BadRequestError badRequestError,\n\t 2: shared.InternalServiceError internalServiceError,\n\t 3: shared.EntityNotExistsError entityNotExistError,\n\t 4: shared.QueryFailedError queryFailedError,\n\t 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n\t)\n\n /**\n * DescribeWorkflowExecution returns information about the specified workflow execution.\n **/\n shared.DescribeWorkflowExecutionResponse DescribeWorkflowExecution(1: shared.DescribeWorkflowExecutionRequest describeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * DescribeTaskList returns information about the target tasklist, right now this API returns the\n * pollers which polled this tasklist in last few minutes.\n **/\n shared.DescribeTaskListResponse DescribeTaskList(1: shared.DescribeTaskListRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n}\n" +const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\ninclude \"shared.thrift\"\ninclude \"replicator.thrift\"\n\nnamespace java com.uber.cadence\n\n/**\n* WorkflowService API is exposed to provide support for long running applications. Application is expected to call\n* StartWorkflowExecution to create an instance for each instance of long running workflow. Such applications are expected\n* to have a worker which regularly polls for DecisionTask and ActivityTask from the WorkflowService. For each\n* DecisionTask, application is expected to process the history of events for that session and respond back with next\n* decisions. For each ActivityTask, application is expected to execute the actual logic for that task and respond back\n* with completion or failure. Worker is expected to regularly heartbeat while activity task is running.\n**/\nservice WorkflowService {\n /**\n * RegisterDomain creates a new domain which can be used as a container for all resources. Domain is a top level\n * entity within Cadence, used as a container for all resources like workflow executions, tasklists, etc. Domain\n * acts as a sandbox and provides isolation for all resources within the domain. All resources belongs to exactly one\n * domain.\n **/\n void RegisterDomain(1: shared.RegisterDomainRequest registerRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.DomainAlreadyExistsError domainExistsError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * DescribeDomain returns the information and configuration for a registered domain.\n **/\n shared.DescribeDomainResponse DescribeDomain(1: shared.DescribeDomainRequest describeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * ListDomains returns the information and configuration for all domains.\n **/\n shared.ListDomainsResponse ListDomains(1: shared.ListDomainsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * UpdateDomain is used to update the information and configuration for a registered domain.\n **/\n shared.UpdateDomainResponse UpdateDomain(1: shared.UpdateDomainRequest updateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * DeprecateDomain us used to update status of a registered domain to DEPRECATED. Once the domain is deprecated\n * it cannot be used to start new workflow executions. Existing workflow executions will continue to run on\n * deprecated domains.\n **/\n void DeprecateDomain(1: shared.DeprecateDomainRequest deprecateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * StartWorkflowExecution starts a new long running workflow instance. It will create the instance with\n * 'WorkflowExecutionStarted' event in history and also schedule the first DecisionTask for the worker to make the\n * first decision for this instance. It will return 'WorkflowExecutionAlreadyStartedError', if an instance already\n * exists with same workflowId.\n **/\n shared.StartWorkflowExecutionResponse StartWorkflowExecution(1: shared.StartWorkflowExecutionRequest startRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.EntityNotExistsError entityNotExistError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * Returns the history of specified workflow execution. It fails with 'EntityNotExistError' if speficied workflow\n * execution in unknown to the service.\n **/\n shared.GetWorkflowExecutionHistoryResponse GetWorkflowExecutionHistory(1: shared.GetWorkflowExecutionHistoryRequest getRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * PollForDecisionTask is called by application worker to process DecisionTask from a specific taskList. A\n * DecisionTask is dispatched to callers for active workflow executions, with pending decisions.\n * Application is then expected to call 'RespondDecisionTaskCompleted' API when it is done processing the DecisionTask.\n * It will also create a 'DecisionTaskStarted' event in the history for that session before handing off DecisionTask to\n * application worker.\n **/\n shared.PollForDecisionTaskResponse PollForDecisionTask(1: shared.PollForDecisionTaskRequest pollRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.ServiceBusyError serviceBusyError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.EntityNotExistsError entityNotExistError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RespondDecisionTaskCompleted is called by application worker to complete a DecisionTask handed as a result of\n * 'PollForDecisionTask' API call. Completing a DecisionTask will result in new events for the workflow execution and\n * potentially new ActivityTask being created for corresponding decisions. It will also create a DecisionTaskCompleted\n * event in the history for that session. Use the 'taskToken' provided as response of PollForDecisionTask API call\n * for completing the DecisionTask.\n * The response could contain a new decision task if there is one or if the request asking for one.\n **/\n shared.RespondDecisionTaskCompletedResponse RespondDecisionTaskCompleted(1: shared.RespondDecisionTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RespondDecisionTaskFailed is called by application worker to indicate failure. This results in\n * DecisionTaskFailedEvent written to the history and a new DecisionTask created. This API can be used by client to\n * either clear sticky tasklist or report any panics during DecisionTask processing. Cadence will only append first\n * DecisionTaskFailed event to the history of workflow execution for consecutive failures.\n **/\n void RespondDecisionTaskFailed(1: shared.RespondDecisionTaskFailedRequest failedRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * PollForActivityTask is called by application worker to process ActivityTask from a specific taskList. ActivityTask\n * is dispatched to callers whenever a ScheduleTask decision is made for a workflow execution.\n * Application is expected to call 'RespondActivityTaskCompleted' or 'RespondActivityTaskFailed' once it is done\n * processing the task.\n * Application also needs to call 'RecordActivityTaskHeartbeat' API within 'heartbeatTimeoutSeconds' interval to\n * prevent the task from getting timed out. An event 'ActivityTaskStarted' event is also written to workflow execution\n * history before the ActivityTask is dispatched to application worker.\n **/\n shared.PollForActivityTaskResponse PollForActivityTask(1: shared.PollForActivityTaskRequest pollRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.ServiceBusyError serviceBusyError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.EntityNotExistsError entityNotExistError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RecordActivityTaskHeartbeat is called by application worker while it is processing an ActivityTask. If worker fails\n * to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and\n * 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeat' will\n * fail with 'EntityNotExistsError' in such situations. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for heartbeating.\n **/\n shared.RecordActivityTaskHeartbeatResponse RecordActivityTaskHeartbeat(1: shared.RecordActivityTaskHeartbeatRequest heartbeatRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RecordActivityTaskHeartbeatByID is called by application worker while it is processing an ActivityTask. If worker fails\n * to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and\n * 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeatByID' will\n * fail with 'EntityNotExistsError' in such situations. Instead of using 'taskToken' like in RecordActivityTaskHeartbeat,\n * use Domain, WorkflowID and ActivityID\n **/\n shared.RecordActivityTaskHeartbeatResponse RecordActivityTaskHeartbeatByID(1: shared.RecordActivityTaskHeartbeatByIDRequest heartbeatRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RespondActivityTaskCompleted is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new DecisionTask\n * created for the workflow so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCompleted(1: shared.RespondActivityTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RespondActivityTaskCompletedByID is called by application worker when it is done processing an ActivityTask.\n * It will result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new DecisionTask\n * created for the workflow so new decisions could be made. Similar to RespondActivityTaskCompleted but use Domain,\n * WorkflowID and ActivityID instead of 'taskToken' for completion. It fails with 'EntityNotExistsError'\n * if the these IDs are not valid anymore due to activity timeout.\n **/\n void RespondActivityTaskCompletedByID(1: shared.RespondActivityTaskCompletedByIDRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RespondActivityTaskFailed is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskFailed' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskFailed(1: shared.RespondActivityTaskFailedRequest failRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RespondActivityTaskFailedByID is called by application worker when it is done processing an ActivityTask.\n * It will result in a new 'ActivityTaskFailed' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Similar to RespondActivityTaskFailed but use\n * Domain, WorkflowID and ActivityID instead of 'taskToken' for completion. It fails with 'EntityNotExistsError'\n * if the these IDs are not valid anymore due to activity timeout.\n **/\n void RespondActivityTaskFailedByID(1: shared.RespondActivityTaskFailedByIDRequest failRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RespondActivityTaskCanceled is called by application worker when it is successfully canceled an ActivityTask. It will\n * result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCanceled(1: shared.RespondActivityTaskCanceledRequest canceledRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RespondActivityTaskCanceledByID is called by application worker when it is successfully canceled an ActivityTask.\n * It will result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Similar to RespondActivityTaskCanceled but use\n * Domain, WorkflowID and ActivityID instead of 'taskToken' for completion. It fails with 'EntityNotExistsError'\n * if the these IDs are not valid anymore due to activity timeout.\n **/\n void RespondActivityTaskCanceledByID(1: shared.RespondActivityTaskCanceledByIDRequest canceledRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RequestCancelWorkflowExecution is called by application worker when it wants to request cancellation of a workflow instance.\n * It will result in a new 'WorkflowExecutionCancelRequested' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. It fails with 'EntityNotExistsError' if the workflow is not valid\n * anymore due to completion or doesn't exist.\n **/\n void RequestCancelWorkflowExecution(1: shared.RequestCancelWorkflowExecutionRequest cancelRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.CancellationAlreadyRequestedError cancellationAlreadyRequestedError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in\n * WorkflowExecutionSignaled event recorded in the history and a decision task being created for the execution.\n **/\n void SignalWorkflowExecution(1: shared.SignalWorkflowExecutionRequest signalRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * SignalWithStartWorkflowExecution is used to ensure sending signal to a workflow.\n * If the workflow is running, this results in WorkflowExecutionSignaled event being recorded in the history\n * and a decision task being created for the execution.\n * If the workflow is not running or not found, this results in WorkflowExecutionStarted and WorkflowExecutionSignaled\n * events being recorded in history, and a decision task being created for the execution\n **/\n shared.StartWorkflowExecutionResponse SignalWithStartWorkflowExecution(1: shared.SignalWithStartWorkflowExecutionRequest signalWithStartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.WorkflowExecutionAlreadyStartedError workflowAlreadyStartedError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * ResetWorkflowExecution reset an existing workflow execution to DecisionTaskCompleted event(exclusive).\n * And it will immediately terminating the current execution instance.\n **/\n shared.ResetWorkflowExecutionResponse ResetWorkflowExecution(1: shared.ResetWorkflowExecutionRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n \n /**\n * TerminateWorkflowExecution terminates an existing workflow execution by recording WorkflowExecutionTerminated event\n * in the history and immediately terminating the execution instance.\n **/\n void TerminateWorkflowExecution(1: shared.TerminateWorkflowExecutionRequest terminateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * ListOpenWorkflowExecutions is a visibility API to list the open executions in a specific domain.\n **/\n shared.ListOpenWorkflowExecutionsResponse ListOpenWorkflowExecutions(1: shared.ListOpenWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * ListClosedWorkflowExecutions is a visibility API to list the closed executions in a specific domain.\n **/\n shared.ListClosedWorkflowExecutionsResponse ListClosedWorkflowExecutions(1: shared.ListClosedWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * ListWorkflowExecutions is a visibility API to list workflow executions in a specific domain.\n **/\n shared.ListWorkflowExecutionsResponse ListWorkflowExecutions(1: shared.ListWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * ScanWorkflowExecutions is a visibility API to list large amount of workflow executions in a specific domain without order.\n **/\n shared.ListWorkflowExecutionsResponse ScanWorkflowExecutions(1: shared.ListWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * CountWorkflowExecutions is a visibility API to count of workflow executions in a specific domain.\n **/\n shared.CountWorkflowExecutionsResponse CountWorkflowExecutions(1: shared.CountWorkflowExecutionsRequest countRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * GetSearchAttributes is a visibility API to get all legal keys that could be used in list APIs\n **/\n shared.GetSearchAttributesResponse GetSearchAttributes()\n throws (\n 1: shared.InternalServiceError internalServiceError,\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * RespondQueryTaskCompleted is called by application worker to complete a QueryTask (which is a DecisionTask for query)\n * as a result of 'PollForDecisionTask' API call. Completing a QueryTask will unblock the client call to 'QueryWorkflow'\n * API and return the query result to client as a response to 'QueryWorkflow' API call.\n **/\n void RespondQueryTaskCompleted(1: shared.RespondQueryTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * Reset the sticky tasklist related information in mutable state of a given workflow.\n * Things cleared are:\n * 1. StickyTaskList\n * 2. StickyScheduleToStartTimeout\n * 3. ClientLibraryVersion\n * 4. ClientFeatureVersion\n * 5. ClientImpl\n **/\n shared.ResetStickyTaskListResponse ResetStickyTaskList(1: shared.ResetStickyTaskListRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * QueryWorkflow returns query result for a specified workflow execution\n **/\n shared.QueryWorkflowResponse QueryWorkflow(1: shared.QueryWorkflowRequest queryRequest)\n\tthrows (\n\t 1: shared.BadRequestError badRequestError,\n\t 2: shared.InternalServiceError internalServiceError,\n\t 3: shared.EntityNotExistsError entityNotExistError,\n\t 4: shared.QueryFailedError queryFailedError,\n\t 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n\t)\n\n /**\n * DescribeWorkflowExecution returns information about the specified workflow execution.\n **/\n shared.DescribeWorkflowExecutionResponse DescribeWorkflowExecution(1: shared.DescribeWorkflowExecutionRequest describeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * DescribeTaskList returns information about the target tasklist, right now this API returns the\n * pollers which polled this tasklist in last few minutes.\n **/\n shared.DescribeTaskListResponse DescribeTaskList(1: shared.DescribeTaskListRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n replicator.GetReplicationMessagesResponse GetReplicationMessages(1: replicator.GetReplicationMessagesRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.LimitExceededError limitExceededError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n}\n" // WorkflowService_CountWorkflowExecutions_Args represents the arguments for the WorkflowService.CountWorkflowExecutions function. // @@ -3695,6 +3697,705 @@ func (v *WorkflowService_DescribeWorkflowExecution_Result) EnvelopeType() wire.E return wire.Reply } +// WorkflowService_GetReplicationMessages_Args represents the arguments for the WorkflowService.GetReplicationMessages function. +// +// The arguments for GetReplicationMessages are sent and received over the wire as this struct. +type WorkflowService_GetReplicationMessages_Args struct { + Request *replicator.GetReplicationMessagesRequest `json:"request,omitempty"` +} + +// ToWire translates a WorkflowService_GetReplicationMessages_Args struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *WorkflowService_GetReplicationMessages_Args) ToWire() (wire.Value, error) { + var ( + fields [1]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.Request != nil { + w, err = v.Request.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 1, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _GetReplicationMessagesRequest_Read(w wire.Value) (*replicator.GetReplicationMessagesRequest, error) { + var v replicator.GetReplicationMessagesRequest + err := v.FromWire(w) + return &v, err +} + +// FromWire deserializes a WorkflowService_GetReplicationMessages_Args struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a WorkflowService_GetReplicationMessages_Args struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v WorkflowService_GetReplicationMessages_Args +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *WorkflowService_GetReplicationMessages_Args) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 1: + if field.Value.Type() == wire.TStruct { + v.Request, err = _GetReplicationMessagesRequest_Read(field.Value) + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a WorkflowService_GetReplicationMessages_Args +// struct. +func (v *WorkflowService_GetReplicationMessages_Args) String() string { + if v == nil { + return "" + } + + var fields [1]string + i := 0 + if v.Request != nil { + fields[i] = fmt.Sprintf("Request: %v", v.Request) + i++ + } + + return fmt.Sprintf("WorkflowService_GetReplicationMessages_Args{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this WorkflowService_GetReplicationMessages_Args match the +// provided WorkflowService_GetReplicationMessages_Args. +// +// This function performs a deep comparison. +func (v *WorkflowService_GetReplicationMessages_Args) Equals(rhs *WorkflowService_GetReplicationMessages_Args) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.Request == nil && rhs.Request == nil) || (v.Request != nil && rhs.Request != nil && v.Request.Equals(rhs.Request))) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of WorkflowService_GetReplicationMessages_Args. +func (v *WorkflowService_GetReplicationMessages_Args) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.Request != nil { + err = multierr.Append(err, enc.AddObject("request", v.Request)) + } + return err +} + +// GetRequest returns the value of Request if it is set or its +// zero value if it is unset. +func (v *WorkflowService_GetReplicationMessages_Args) GetRequest() (o *replicator.GetReplicationMessagesRequest) { + if v != nil && v.Request != nil { + return v.Request + } + + return +} + +// IsSetRequest returns true if Request is not nil. +func (v *WorkflowService_GetReplicationMessages_Args) IsSetRequest() bool { + return v != nil && v.Request != nil +} + +// MethodName returns the name of the Thrift function as specified in +// the IDL, for which this struct represent the arguments. +// +// This will always be "GetReplicationMessages" for this struct. +func (v *WorkflowService_GetReplicationMessages_Args) MethodName() string { + return "GetReplicationMessages" +} + +// EnvelopeType returns the kind of value inside this struct. +// +// This will always be Call for this struct. +func (v *WorkflowService_GetReplicationMessages_Args) EnvelopeType() wire.EnvelopeType { + return wire.Call +} + +// WorkflowService_GetReplicationMessages_Helper provides functions that aid in handling the +// parameters and return values of the WorkflowService.GetReplicationMessages +// function. +var WorkflowService_GetReplicationMessages_Helper = struct { + // Args accepts the parameters of GetReplicationMessages in-order and returns + // the arguments struct for the function. + Args func( + request *replicator.GetReplicationMessagesRequest, + ) *WorkflowService_GetReplicationMessages_Args + + // IsException returns true if the given error can be thrown + // by GetReplicationMessages. + // + // An error can be thrown by GetReplicationMessages only if the + // corresponding exception type was mentioned in the 'throws' + // section for it in the Thrift file. + IsException func(error) bool + + // WrapResponse returns the result struct for GetReplicationMessages + // given its return value and error. + // + // This allows mapping values and errors returned by + // GetReplicationMessages into a serializable result struct. + // WrapResponse returns a non-nil error if the provided + // error cannot be thrown by GetReplicationMessages + // + // value, err := GetReplicationMessages(args) + // result, err := WorkflowService_GetReplicationMessages_Helper.WrapResponse(value, err) + // if err != nil { + // return fmt.Errorf("unexpected error from GetReplicationMessages: %v", err) + // } + // serialize(result) + WrapResponse func(*replicator.GetReplicationMessagesResponse, error) (*WorkflowService_GetReplicationMessages_Result, error) + + // UnwrapResponse takes the result struct for GetReplicationMessages + // and returns the value or error returned by it. + // + // The error is non-nil only if GetReplicationMessages threw an + // exception. + // + // result := deserialize(bytes) + // value, err := WorkflowService_GetReplicationMessages_Helper.UnwrapResponse(result) + UnwrapResponse func(*WorkflowService_GetReplicationMessages_Result) (*replicator.GetReplicationMessagesResponse, error) +}{} + +func init() { + WorkflowService_GetReplicationMessages_Helper.Args = func( + request *replicator.GetReplicationMessagesRequest, + ) *WorkflowService_GetReplicationMessages_Args { + return &WorkflowService_GetReplicationMessages_Args{ + Request: request, + } + } + + WorkflowService_GetReplicationMessages_Helper.IsException = func(err error) bool { + switch err.(type) { + case *shared.BadRequestError: + return true + case *shared.InternalServiceError: + return true + case *shared.LimitExceededError: + return true + case *shared.ServiceBusyError: + return true + case *shared.ClientVersionNotSupportedError: + return true + default: + return false + } + } + + WorkflowService_GetReplicationMessages_Helper.WrapResponse = func(success *replicator.GetReplicationMessagesResponse, err error) (*WorkflowService_GetReplicationMessages_Result, error) { + if err == nil { + return &WorkflowService_GetReplicationMessages_Result{Success: success}, nil + } + + switch e := err.(type) { + case *shared.BadRequestError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for WorkflowService_GetReplicationMessages_Result.BadRequestError") + } + return &WorkflowService_GetReplicationMessages_Result{BadRequestError: e}, nil + case *shared.InternalServiceError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for WorkflowService_GetReplicationMessages_Result.InternalServiceError") + } + return &WorkflowService_GetReplicationMessages_Result{InternalServiceError: e}, nil + case *shared.LimitExceededError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for WorkflowService_GetReplicationMessages_Result.LimitExceededError") + } + return &WorkflowService_GetReplicationMessages_Result{LimitExceededError: e}, nil + case *shared.ServiceBusyError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for WorkflowService_GetReplicationMessages_Result.ServiceBusyError") + } + return &WorkflowService_GetReplicationMessages_Result{ServiceBusyError: e}, nil + case *shared.ClientVersionNotSupportedError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for WorkflowService_GetReplicationMessages_Result.ClientVersionNotSupportedError") + } + return &WorkflowService_GetReplicationMessages_Result{ClientVersionNotSupportedError: e}, nil + } + + return nil, err + } + WorkflowService_GetReplicationMessages_Helper.UnwrapResponse = func(result *WorkflowService_GetReplicationMessages_Result) (success *replicator.GetReplicationMessagesResponse, err error) { + if result.BadRequestError != nil { + err = result.BadRequestError + return + } + if result.InternalServiceError != nil { + err = result.InternalServiceError + return + } + if result.LimitExceededError != nil { + err = result.LimitExceededError + return + } + if result.ServiceBusyError != nil { + err = result.ServiceBusyError + return + } + if result.ClientVersionNotSupportedError != nil { + err = result.ClientVersionNotSupportedError + return + } + + if result.Success != nil { + success = result.Success + return + } + + err = errors.New("expected a non-void result") + return + } + +} + +// WorkflowService_GetReplicationMessages_Result represents the result of a WorkflowService.GetReplicationMessages function call. +// +// The result of a GetReplicationMessages execution is sent and received over the wire as this struct. +// +// Success is set only if the function did not throw an exception. +type WorkflowService_GetReplicationMessages_Result struct { + // Value returned by GetReplicationMessages after a successful execution. + Success *replicator.GetReplicationMessagesResponse `json:"success,omitempty"` + BadRequestError *shared.BadRequestError `json:"badRequestError,omitempty"` + InternalServiceError *shared.InternalServiceError `json:"internalServiceError,omitempty"` + LimitExceededError *shared.LimitExceededError `json:"limitExceededError,omitempty"` + ServiceBusyError *shared.ServiceBusyError `json:"serviceBusyError,omitempty"` + ClientVersionNotSupportedError *shared.ClientVersionNotSupportedError `json:"clientVersionNotSupportedError,omitempty"` +} + +// ToWire translates a WorkflowService_GetReplicationMessages_Result struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *WorkflowService_GetReplicationMessages_Result) ToWire() (wire.Value, error) { + var ( + fields [6]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.Success != nil { + w, err = v.Success.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 0, Value: w} + i++ + } + if v.BadRequestError != nil { + w, err = v.BadRequestError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 1, Value: w} + i++ + } + if v.InternalServiceError != nil { + w, err = v.InternalServiceError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 2, Value: w} + i++ + } + if v.LimitExceededError != nil { + w, err = v.LimitExceededError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 3, Value: w} + i++ + } + if v.ServiceBusyError != nil { + w, err = v.ServiceBusyError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 4, Value: w} + i++ + } + if v.ClientVersionNotSupportedError != nil { + w, err = v.ClientVersionNotSupportedError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 5, Value: w} + i++ + } + + if i != 1 { + return wire.Value{}, fmt.Errorf("WorkflowService_GetReplicationMessages_Result should have exactly one field: got %v fields", i) + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _GetReplicationMessagesResponse_Read(w wire.Value) (*replicator.GetReplicationMessagesResponse, error) { + var v replicator.GetReplicationMessagesResponse + err := v.FromWire(w) + return &v, err +} + +// FromWire deserializes a WorkflowService_GetReplicationMessages_Result struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a WorkflowService_GetReplicationMessages_Result struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v WorkflowService_GetReplicationMessages_Result +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *WorkflowService_GetReplicationMessages_Result) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 0: + if field.Value.Type() == wire.TStruct { + v.Success, err = _GetReplicationMessagesResponse_Read(field.Value) + if err != nil { + return err + } + + } + case 1: + if field.Value.Type() == wire.TStruct { + v.BadRequestError, err = _BadRequestError_Read(field.Value) + if err != nil { + return err + } + + } + case 2: + if field.Value.Type() == wire.TStruct { + v.InternalServiceError, err = _InternalServiceError_Read(field.Value) + if err != nil { + return err + } + + } + case 3: + if field.Value.Type() == wire.TStruct { + v.LimitExceededError, err = _LimitExceededError_Read(field.Value) + if err != nil { + return err + } + + } + case 4: + if field.Value.Type() == wire.TStruct { + v.ServiceBusyError, err = _ServiceBusyError_Read(field.Value) + if err != nil { + return err + } + + } + case 5: + if field.Value.Type() == wire.TStruct { + v.ClientVersionNotSupportedError, err = _ClientVersionNotSupportedError_Read(field.Value) + if err != nil { + return err + } + + } + } + } + + count := 0 + if v.Success != nil { + count++ + } + if v.BadRequestError != nil { + count++ + } + if v.InternalServiceError != nil { + count++ + } + if v.LimitExceededError != nil { + count++ + } + if v.ServiceBusyError != nil { + count++ + } + if v.ClientVersionNotSupportedError != nil { + count++ + } + if count != 1 { + return fmt.Errorf("WorkflowService_GetReplicationMessages_Result should have exactly one field: got %v fields", count) + } + + return nil +} + +// String returns a readable string representation of a WorkflowService_GetReplicationMessages_Result +// struct. +func (v *WorkflowService_GetReplicationMessages_Result) String() string { + if v == nil { + return "" + } + + var fields [6]string + i := 0 + if v.Success != nil { + fields[i] = fmt.Sprintf("Success: %v", v.Success) + i++ + } + if v.BadRequestError != nil { + fields[i] = fmt.Sprintf("BadRequestError: %v", v.BadRequestError) + i++ + } + if v.InternalServiceError != nil { + fields[i] = fmt.Sprintf("InternalServiceError: %v", v.InternalServiceError) + i++ + } + if v.LimitExceededError != nil { + fields[i] = fmt.Sprintf("LimitExceededError: %v", v.LimitExceededError) + i++ + } + if v.ServiceBusyError != nil { + fields[i] = fmt.Sprintf("ServiceBusyError: %v", v.ServiceBusyError) + i++ + } + if v.ClientVersionNotSupportedError != nil { + fields[i] = fmt.Sprintf("ClientVersionNotSupportedError: %v", v.ClientVersionNotSupportedError) + i++ + } + + return fmt.Sprintf("WorkflowService_GetReplicationMessages_Result{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this WorkflowService_GetReplicationMessages_Result match the +// provided WorkflowService_GetReplicationMessages_Result. +// +// This function performs a deep comparison. +func (v *WorkflowService_GetReplicationMessages_Result) Equals(rhs *WorkflowService_GetReplicationMessages_Result) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.Success == nil && rhs.Success == nil) || (v.Success != nil && rhs.Success != nil && v.Success.Equals(rhs.Success))) { + return false + } + if !((v.BadRequestError == nil && rhs.BadRequestError == nil) || (v.BadRequestError != nil && rhs.BadRequestError != nil && v.BadRequestError.Equals(rhs.BadRequestError))) { + return false + } + if !((v.InternalServiceError == nil && rhs.InternalServiceError == nil) || (v.InternalServiceError != nil && rhs.InternalServiceError != nil && v.InternalServiceError.Equals(rhs.InternalServiceError))) { + return false + } + if !((v.LimitExceededError == nil && rhs.LimitExceededError == nil) || (v.LimitExceededError != nil && rhs.LimitExceededError != nil && v.LimitExceededError.Equals(rhs.LimitExceededError))) { + return false + } + if !((v.ServiceBusyError == nil && rhs.ServiceBusyError == nil) || (v.ServiceBusyError != nil && rhs.ServiceBusyError != nil && v.ServiceBusyError.Equals(rhs.ServiceBusyError))) { + return false + } + if !((v.ClientVersionNotSupportedError == nil && rhs.ClientVersionNotSupportedError == nil) || (v.ClientVersionNotSupportedError != nil && rhs.ClientVersionNotSupportedError != nil && v.ClientVersionNotSupportedError.Equals(rhs.ClientVersionNotSupportedError))) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of WorkflowService_GetReplicationMessages_Result. +func (v *WorkflowService_GetReplicationMessages_Result) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.Success != nil { + err = multierr.Append(err, enc.AddObject("success", v.Success)) + } + if v.BadRequestError != nil { + err = multierr.Append(err, enc.AddObject("badRequestError", v.BadRequestError)) + } + if v.InternalServiceError != nil { + err = multierr.Append(err, enc.AddObject("internalServiceError", v.InternalServiceError)) + } + if v.LimitExceededError != nil { + err = multierr.Append(err, enc.AddObject("limitExceededError", v.LimitExceededError)) + } + if v.ServiceBusyError != nil { + err = multierr.Append(err, enc.AddObject("serviceBusyError", v.ServiceBusyError)) + } + if v.ClientVersionNotSupportedError != nil { + err = multierr.Append(err, enc.AddObject("clientVersionNotSupportedError", v.ClientVersionNotSupportedError)) + } + return err +} + +// GetSuccess returns the value of Success if it is set or its +// zero value if it is unset. +func (v *WorkflowService_GetReplicationMessages_Result) GetSuccess() (o *replicator.GetReplicationMessagesResponse) { + if v != nil && v.Success != nil { + return v.Success + } + + return +} + +// IsSetSuccess returns true if Success is not nil. +func (v *WorkflowService_GetReplicationMessages_Result) IsSetSuccess() bool { + return v != nil && v.Success != nil +} + +// GetBadRequestError returns the value of BadRequestError if it is set or its +// zero value if it is unset. +func (v *WorkflowService_GetReplicationMessages_Result) GetBadRequestError() (o *shared.BadRequestError) { + if v != nil && v.BadRequestError != nil { + return v.BadRequestError + } + + return +} + +// IsSetBadRequestError returns true if BadRequestError is not nil. +func (v *WorkflowService_GetReplicationMessages_Result) IsSetBadRequestError() bool { + return v != nil && v.BadRequestError != nil +} + +// GetInternalServiceError returns the value of InternalServiceError if it is set or its +// zero value if it is unset. +func (v *WorkflowService_GetReplicationMessages_Result) GetInternalServiceError() (o *shared.InternalServiceError) { + if v != nil && v.InternalServiceError != nil { + return v.InternalServiceError + } + + return +} + +// IsSetInternalServiceError returns true if InternalServiceError is not nil. +func (v *WorkflowService_GetReplicationMessages_Result) IsSetInternalServiceError() bool { + return v != nil && v.InternalServiceError != nil +} + +// GetLimitExceededError returns the value of LimitExceededError if it is set or its +// zero value if it is unset. +func (v *WorkflowService_GetReplicationMessages_Result) GetLimitExceededError() (o *shared.LimitExceededError) { + if v != nil && v.LimitExceededError != nil { + return v.LimitExceededError + } + + return +} + +// IsSetLimitExceededError returns true if LimitExceededError is not nil. +func (v *WorkflowService_GetReplicationMessages_Result) IsSetLimitExceededError() bool { + return v != nil && v.LimitExceededError != nil +} + +// GetServiceBusyError returns the value of ServiceBusyError if it is set or its +// zero value if it is unset. +func (v *WorkflowService_GetReplicationMessages_Result) GetServiceBusyError() (o *shared.ServiceBusyError) { + if v != nil && v.ServiceBusyError != nil { + return v.ServiceBusyError + } + + return +} + +// IsSetServiceBusyError returns true if ServiceBusyError is not nil. +func (v *WorkflowService_GetReplicationMessages_Result) IsSetServiceBusyError() bool { + return v != nil && v.ServiceBusyError != nil +} + +// GetClientVersionNotSupportedError returns the value of ClientVersionNotSupportedError if it is set or its +// zero value if it is unset. +func (v *WorkflowService_GetReplicationMessages_Result) GetClientVersionNotSupportedError() (o *shared.ClientVersionNotSupportedError) { + if v != nil && v.ClientVersionNotSupportedError != nil { + return v.ClientVersionNotSupportedError + } + + return +} + +// IsSetClientVersionNotSupportedError returns true if ClientVersionNotSupportedError is not nil. +func (v *WorkflowService_GetReplicationMessages_Result) IsSetClientVersionNotSupportedError() bool { + return v != nil && v.ClientVersionNotSupportedError != nil +} + +// MethodName returns the name of the Thrift function as specified in +// the IDL, for which this struct represent the result. +// +// This will always be "GetReplicationMessages" for this struct. +func (v *WorkflowService_GetReplicationMessages_Result) MethodName() string { + return "GetReplicationMessages" +} + +// EnvelopeType returns the kind of value inside this struct. +// +// This will always be Reply for this struct. +func (v *WorkflowService_GetReplicationMessages_Result) EnvelopeType() wire.EnvelopeType { + return wire.Reply +} + // WorkflowService_GetSearchAttributes_Args represents the arguments for the WorkflowService.GetSearchAttributes function. // // The arguments for GetSearchAttributes are sent and received over the wire as this struct. diff --git a/.gen/go/cadence/workflowserviceclient/client.go b/.gen/go/cadence/workflowserviceclient/client.go index 7a937942db9..de1781a0547 100644 --- a/.gen/go/cadence/workflowserviceclient/client.go +++ b/.gen/go/cadence/workflowserviceclient/client.go @@ -28,6 +28,7 @@ package workflowserviceclient import ( context "context" cadence "github.com/uber/cadence/.gen/go/cadence" + replicator "github.com/uber/cadence/.gen/go/replicator" shared "github.com/uber/cadence/.gen/go/shared" wire "go.uber.org/thriftrw/wire" yarpc "go.uber.org/yarpc" @@ -68,6 +69,12 @@ type Interface interface { opts ...yarpc.CallOption, ) (*shared.DescribeWorkflowExecutionResponse, error) + GetReplicationMessages( + ctx context.Context, + Request *replicator.GetReplicationMessagesRequest, + opts ...yarpc.CallOption, + ) (*replicator.GetReplicationMessagesResponse, error) + GetSearchAttributes( ctx context.Context, opts ...yarpc.CallOption, @@ -387,6 +394,29 @@ func (c client) DescribeWorkflowExecution( return } +func (c client) GetReplicationMessages( + ctx context.Context, + _Request *replicator.GetReplicationMessagesRequest, + opts ...yarpc.CallOption, +) (success *replicator.GetReplicationMessagesResponse, err error) { + + args := cadence.WorkflowService_GetReplicationMessages_Helper.Args(_Request) + + var body wire.Value + body, err = c.c.Call(ctx, args, opts...) + if err != nil { + return + } + + var result cadence.WorkflowService_GetReplicationMessages_Result + if err = result.FromWire(body); err != nil { + return + } + + success, err = cadence.WorkflowService_GetReplicationMessages_Helper.UnwrapResponse(&result) + return +} + func (c client) GetSearchAttributes( ctx context.Context, opts ...yarpc.CallOption, diff --git a/.gen/go/cadence/workflowserviceserver/server.go b/.gen/go/cadence/workflowserviceserver/server.go index a0ed62485e8..f4e8fe58f7f 100644 --- a/.gen/go/cadence/workflowserviceserver/server.go +++ b/.gen/go/cadence/workflowserviceserver/server.go @@ -28,6 +28,7 @@ package workflowserviceserver import ( context "context" cadence "github.com/uber/cadence/.gen/go/cadence" + replicator "github.com/uber/cadence/.gen/go/replicator" shared "github.com/uber/cadence/.gen/go/shared" wire "go.uber.org/thriftrw/wire" transport "go.uber.org/yarpc/api/transport" @@ -61,6 +62,11 @@ type Interface interface { DescribeRequest *shared.DescribeWorkflowExecutionRequest, ) (*shared.DescribeWorkflowExecutionResponse, error) + GetReplicationMessages( + ctx context.Context, + Request *replicator.GetReplicationMessagesRequest, + ) (*replicator.GetReplicationMessagesResponse, error) + GetSearchAttributes( ctx context.Context, ) (*shared.GetSearchAttributesResponse, error) @@ -277,6 +283,17 @@ func New(impl Interface, opts ...thrift.RegisterOption) []transport.Procedure { ThriftModule: cadence.ThriftModule, }, + thrift.Method{ + Name: "GetReplicationMessages", + HandlerSpec: thrift.HandlerSpec{ + + Type: transport.Unary, + Unary: thrift.UnaryHandler(h.GetReplicationMessages), + }, + Signature: "GetReplicationMessages(Request *replicator.GetReplicationMessagesRequest) (*replicator.GetReplicationMessagesResponse)", + ThriftModule: cadence.ThriftModule, + }, + thrift.Method{ Name: "GetSearchAttributes", HandlerSpec: thrift.HandlerSpec{ @@ -609,7 +626,7 @@ func New(impl Interface, opts ...thrift.RegisterOption) []transport.Procedure { }, } - procedures := make([]transport.Procedure, 0, 35) + procedures := make([]transport.Procedure, 0, 36) procedures = append(procedures, thrift.BuildProcedures(service, opts...)...) return procedures } @@ -711,6 +728,25 @@ func (h handler) DescribeWorkflowExecution(ctx context.Context, body wire.Value) return response, err } +func (h handler) GetReplicationMessages(ctx context.Context, body wire.Value) (thrift.Response, error) { + var args cadence.WorkflowService_GetReplicationMessages_Args + if err := args.FromWire(body); err != nil { + return thrift.Response{}, err + } + + success, err := h.impl.GetReplicationMessages(ctx, args.Request) + + hadError := err != nil + result, err := cadence.WorkflowService_GetReplicationMessages_Helper.WrapResponse(success, err) + + var response thrift.Response + if err == nil { + response.IsApplicationError = hadError + response.Body = result + } + return response, err +} + func (h handler) GetSearchAttributes(ctx context.Context, body wire.Value) (thrift.Response, error) { var args cadence.WorkflowService_GetSearchAttributes_Args if err := args.FromWire(body); err != nil { diff --git a/.gen/go/cadence/workflowservicetest/client.go b/.gen/go/cadence/workflowservicetest/client.go index b3e79047db4..c23529522a0 100644 --- a/.gen/go/cadence/workflowservicetest/client.go +++ b/.gen/go/cadence/workflowservicetest/client.go @@ -29,6 +29,7 @@ import ( context "context" gomock "github.com/golang/mock/gomock" workflowserviceclient "github.com/uber/cadence/.gen/go/cadence/workflowserviceclient" + replicator "github.com/uber/cadence/.gen/go/replicator" shared "github.com/uber/cadence/.gen/go/shared" yarpc "go.uber.org/yarpc" ) @@ -227,6 +228,39 @@ func (mr *_MockClientRecorder) DescribeWorkflowExecution( return mr.mock.ctrl.RecordCall(mr.mock, "DescribeWorkflowExecution", args...) } +// GetReplicationMessages responds to a GetReplicationMessages call based on the mock expectations. This +// call will fail if the mock does not expect this call. Use EXPECT to expect +// a call to this function. +// +// client.EXPECT().GetReplicationMessages(gomock.Any(), ...).Return(...) +// ... := client.GetReplicationMessages(...) +func (m *MockClient) GetReplicationMessages( + ctx context.Context, + _Request *replicator.GetReplicationMessagesRequest, + opts ...yarpc.CallOption, +) (success *replicator.GetReplicationMessagesResponse, err error) { + + args := []interface{}{ctx, _Request} + for _, o := range opts { + args = append(args, o) + } + i := 0 + ret := m.ctrl.Call(m, "GetReplicationMessages", args...) + success, _ = ret[i].(*replicator.GetReplicationMessagesResponse) + i++ + err, _ = ret[i].(error) + return +} + +func (mr *_MockClientRecorder) GetReplicationMessages( + ctx interface{}, + _Request interface{}, + opts ...interface{}, +) *gomock.Call { + args := append([]interface{}{ctx, _Request}, opts...) + return mr.mock.ctrl.RecordCall(mr.mock, "GetReplicationMessages", args...) +} + // GetSearchAttributes responds to a GetSearchAttributes call based on the mock expectations. This // call will fail if the mock does not expect this call. Use EXPECT to expect // a call to this function. diff --git a/.gen/go/history/history.go b/.gen/go/history/history.go index e73f675f6e5..661b4b2350c 100644 --- a/.gen/go/history/history.go +++ b/.gen/go/history/history.go @@ -20,7 +20,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -// Code generated by thriftrw v1.20.0. DO NOT EDIT. +// Code generated by thriftrw v1.20.1. DO NOT EDIT. // @generated package history @@ -30,6 +30,7 @@ import ( base64 "encoding/base64" errors "errors" fmt "fmt" + replicator "github.com/uber/cadence/.gen/go/replicator" shared "github.com/uber/cadence/.gen/go/shared" multierr "go.uber.org/multierr" thriftreflect "go.uber.org/thriftrw/thriftreflect" @@ -10032,14 +10033,15 @@ var ThriftModule = &thriftreflect.ThriftModule{ Name: "history", Package: "github.com/uber/cadence/.gen/go/history", FilePath: "history.thrift", - SHA1: "fcf34660435969037311b9ef1f79d9d370db6a62", + SHA1: "9d80034f8b44cc70a7a43734229685e7a50c7508", Includes: []*thriftreflect.ThriftModule{ + replicator.ThriftModule, shared.ThriftModule, }, Raw: rawIDL, } -const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\ninclude \"shared.thrift\"\n\nnamespace java com.uber.cadence.history\n\nexception EventAlreadyStartedError {\n 1: required string message\n}\n\nexception ShardOwnershipLostError {\n 10: optional string message\n 20: optional string owner\n}\n\nstruct ParentExecutionInfo {\n 10: optional string domainUUID\n 15: optional string domain\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") initiatedId\n}\n\nstruct StartWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.StartWorkflowExecutionRequest startRequest\n 30: optional ParentExecutionInfo parentExecutionInfo\n 40: optional i32 attempt\n 50: optional i64 (js.type = \"Long\") expirationTimestamp\n 55: optional shared.ContinueAsNewInitiator continueAsNewInitiator\n 56: optional string continuedFailureReason\n 57: optional binary continuedFailureDetails\n 58: optional binary lastCompletionResult\n 60: optional i32 firstDecisionTaskBackoffSeconds\n}\n\nstruct DescribeMutableStateRequest{\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n}\n\nstruct DescribeMutableStateResponse{\n 30: optional string mutableStateInCache\n 40: optional string mutableStateInDatabase\n}\n\nstruct GetMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") expectedNextEventId\n}\n\nstruct GetMutableStateResponse {\n 10: optional shared.WorkflowExecution execution\n 20: optional shared.WorkflowType workflowType\n 30: optional i64 (js.type = \"Long\") NextEventId\n 35: optional i64 (js.type = \"Long\") PreviousStartedEventId\n 40: optional i64 (js.type = \"Long\") LastFirstEventId\n 50: optional shared.TaskList taskList\n 60: optional shared.TaskList stickyTaskList\n 70: optional string clientLibraryVersion\n 80: optional string clientFeatureVersion\n 90: optional string clientImpl\n 100: optional bool isWorkflowRunning\n 110: optional i32 stickyTaskListScheduleToStartTimeout\n 120: optional i32 eventStoreVersion\n 130: optional binary branchToken\n 140: optional map replicationInfo\n}\n\nstruct ResetStickyTaskListRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n}\n\nstruct ResetStickyTaskListResponse {\n // The reason to keep this response is to allow returning\n // information in the future.\n}\n\nstruct RespondDecisionTaskCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondDecisionTaskCompletedRequest completeRequest\n}\n\nstruct RespondDecisionTaskCompletedResponse {\n 10: optional RecordDecisionTaskStartedResponse startedResponse\n}\n\nstruct RespondDecisionTaskFailedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondDecisionTaskFailedRequest failedRequest\n}\n\nstruct RecordActivityTaskHeartbeatRequest {\n 10: optional string domainUUID\n 20: optional shared.RecordActivityTaskHeartbeatRequest heartbeatRequest\n}\n\nstruct RespondActivityTaskCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskCompletedRequest completeRequest\n}\n\nstruct RespondActivityTaskFailedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskFailedRequest failedRequest\n}\n\nstruct RespondActivityTaskCanceledRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskCanceledRequest cancelRequest\n}\n\nstruct RecordActivityTaskStartedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") scheduleId\n 40: optional i64 (js.type = \"Long\") taskId\n 45: optional string requestId // Unique id of each poll request. Used to ensure at most once delivery of tasks.\n 50: optional shared.PollForActivityTaskRequest pollRequest\n}\n\nstruct RecordActivityTaskStartedResponse {\n 20: optional shared.HistoryEvent scheduledEvent\n 30: optional i64 (js.type = \"Long\") startedTimestamp\n 40: optional i64 (js.type = \"Long\") attempt\n 50: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 60: optional binary heartbeatDetails\n 70: optional shared.WorkflowType workflowType\n 80: optional string workflowDomain\n}\n\nstruct RecordDecisionTaskStartedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") scheduleId\n 40: optional i64 (js.type = \"Long\") taskId\n 45: optional string requestId // Unique id of each poll request. Used to ensure at most once delivery of tasks.\n 50: optional shared.PollForDecisionTaskRequest pollRequest\n}\n\nstruct RecordDecisionTaskStartedResponse {\n 10: optional shared.WorkflowType workflowType\n 20: optional i64 (js.type = \"Long\") previousStartedEventId\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") attempt\n 70: optional bool stickyExecutionEnabled\n 80: optional shared.TransientDecisionInfo decisionInfo\n 90: optional shared.TaskList WorkflowExecutionTaskList\n 100: optional i32 eventStoreVersion\n 110: optional binary branchToken\n 120: optional i64 (js.type = \"Long\") scheduledTimestamp\n 130: optional i64 (js.type = \"Long\") startedTimestamp\n}\n\nstruct SignalWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.SignalWorkflowExecutionRequest signalRequest\n 30: optional shared.WorkflowExecution externalWorkflowExecution\n 40: optional bool childWorkflowOnly\n}\n\nstruct SignalWithStartWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.SignalWithStartWorkflowExecutionRequest signalWithStartRequest\n}\n\nstruct RemoveSignalMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional string requestId\n}\n\nstruct TerminateWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.TerminateWorkflowExecutionRequest terminateRequest\n}\n\nstruct ResetWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.ResetWorkflowExecutionRequest resetRequest\n}\n\nstruct RequestCancelWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.RequestCancelWorkflowExecutionRequest cancelRequest\n 30: optional i64 (js.type = \"Long\") externalInitiatedEventId\n 40: optional shared.WorkflowExecution externalWorkflowExecution\n 50: optional bool childWorkflowOnly\n}\n\nstruct ScheduleDecisionTaskRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional bool isFirstDecision\n}\n\nstruct DescribeWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.DescribeWorkflowExecutionRequest request\n}\n\n/**\n* RecordChildExecutionCompletedRequest is used for reporting the completion of child execution to parent workflow\n* execution which started it. When a child execution is completed it creates this request and calls the\n* RecordChildExecutionCompleted API with the workflowExecution of parent. It also sets the completedExecution of the\n* child as it could potentially be different than the ChildExecutionStartedEvent of parent in the situation when\n* child creates multiple runs through ContinueAsNew before finally completing.\n**/\nstruct RecordChildExecutionCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") initiatedId\n 40: optional shared.WorkflowExecution completedExecution\n 50: optional shared.HistoryEvent completionEvent\n}\n\nstruct ReplicateEventsRequest {\n 10: optional string sourceCluster\n 20: optional string domainUUID\n 30: optional shared.WorkflowExecution workflowExecution\n 40: optional i64 (js.type = \"Long\") firstEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") version\n 70: optional map replicationInfo\n 80: optional shared.History history\n 90: optional shared.History newRunHistory\n 100: optional bool forceBufferEvents // this attribute is deprecated\n 110: optional i32 eventStoreVersion\n 120: optional i32 newRunEventStoreVersion\n 130: optional bool resetWorkflow\n}\n\nstruct ReplicateRawEventsRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional map replicationInfo\n 40: optional shared.DataBlob history\n 50: optional shared.DataBlob newRunHistory\n 60: optional i32 eventStoreVersion\n 70: optional i32 newRunEventStoreVersion\n}\n\nstruct SyncShardStatusRequest {\n 10: optional string sourceCluster\n 20: optional i64 (js.type = \"Long\") shardId\n 30: optional i64 (js.type = \"Long\") timestamp\n}\n\nstruct SyncActivityRequest {\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") version\n 50: optional i64 (js.type = \"Long\") scheduledId\n 60: optional i64 (js.type = \"Long\") scheduledTime\n 70: optional i64 (js.type = \"Long\") startedId\n 80: optional i64 (js.type = \"Long\") startedTime\n 90: optional i64 (js.type = \"Long\") lastHeartbeatTime\n 100: optional binary details\n 110: optional i32 attempt\n 120: optional string lastFailureReason\n 130: optional string lastWorkerIdentity\n}\n\n/**\n* HistoryService provides API to start a new long running workflow instance, as well as query and update the history\n* of workflow instances already created.\n**/\nservice HistoryService {\n /**\n * StartWorkflowExecution starts a new long running workflow instance. It will create the instance with\n * 'WorkflowExecutionStarted' event in history and also schedule the first DecisionTask for the worker to make the\n * first decision for this instance. It will return 'WorkflowExecutionAlreadyStartedError', if an instance already\n * exists with same workflowId.\n **/\n shared.StartWorkflowExecutionResponse StartWorkflowExecution(1: StartWorkflowExecutionRequest startRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * Returns the information from mutable state of workflow execution.\n * It fails with 'EntityNotExistError' if specified workflow execution in unknown to the service.\n **/\n GetMutableStateResponse GetMutableState(1: GetMutableStateRequest getRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * Reset the sticky tasklist related information in mutable state of a given workflow.\n * Things cleared are:\n * 1. StickyTaskList\n * 2. StickyScheduleToStartTimeout\n * 3. ClientLibraryVersion\n * 4. ClientFeatureVersion\n * 5. ClientImpl\n **/\n ResetStickyTaskListResponse ResetStickyTaskList(1: ResetStickyTaskListRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordDecisionTaskStarted is called by the Matchingservice before it hands a decision task to the application worker in response to\n * a PollForDecisionTask call. It records in the history the event that the decision task has started. It will return 'EventAlreadyStartedError',\n * if the workflow's execution history already includes a record of the event starting.\n **/\n RecordDecisionTaskStartedResponse RecordDecisionTaskStarted(1: RecordDecisionTaskStartedRequest addRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: EventAlreadyStartedError eventAlreadyStartedError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordActivityTaskStarted is called by the Matchingservice before it hands a decision task to the application worker in response to\n * a PollForActivityTask call. It records in the history the event that the decision task has started. It will return 'EventAlreadyStartedError',\n * if the workflow's execution history already includes a record of the event starting.\n **/\n RecordActivityTaskStartedResponse RecordActivityTaskStarted(1: RecordActivityTaskStartedRequest addRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: EventAlreadyStartedError eventAlreadyStartedError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondDecisionTaskCompleted is called by application worker to complete a DecisionTask handed as a result of\n * 'PollForDecisionTask' API call. Completing a DecisionTask will result in new events for the workflow execution and\n * potentially new ActivityTask being created for corresponding decisions. It will also create a DecisionTaskCompleted\n * event in the history for that session. Use the 'taskToken' provided as response of PollForDecisionTask API call\n * for completing the DecisionTask.\n **/\n RespondDecisionTaskCompletedResponse RespondDecisionTaskCompleted(1: RespondDecisionTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondDecisionTaskFailed is called by application worker to indicate failure. This results in\n * DecisionTaskFailedEvent written to the history and a new DecisionTask created. This API can be used by client to\n * either clear sticky tasklist or report ny panics during DecisionTask processing.\n **/\n void RespondDecisionTaskFailed(1: RespondDecisionTaskFailedRequest failedRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordActivityTaskHeartbeat is called by application worker while it is processing an ActivityTask. If worker fails\n * to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and\n * 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeat' will\n * fail with 'EntityNotExistsError' in such situations. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for heartbeating.\n **/\n shared.RecordActivityTaskHeartbeatResponse RecordActivityTaskHeartbeat(1: RecordActivityTaskHeartbeatRequest heartbeatRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskCompleted is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new DecisionTask\n * created for the workflow so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCompleted(1: RespondActivityTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskFailed is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskFailed' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskFailed(1: RespondActivityTaskFailedRequest failRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskCanceled is called by application worker when it is successfully canceled an ActivityTask. It will\n * result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCanceled(1: RespondActivityTaskCanceledRequest canceledRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in\n * WorkflowExecutionSignaled event recorded in the history and a decision task being created for the execution.\n **/\n void SignalWorkflowExecution(1: SignalWorkflowExecutionRequest signalRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.LimitExceededError limitExceededError,\n )\n\n /**\n * SignalWithStartWorkflowExecution is used to ensure sending a signal event to a workflow execution.\n * If workflow is running, this results in WorkflowExecutionSignaled event recorded in the history\n * and a decision task being created for the execution.\n * If workflow is not running or not found, it will first try start workflow with given WorkflowIDResuePolicy,\n * and record WorkflowExecutionStarted and WorkflowExecutionSignaled event in case of success.\n * It will return `WorkflowExecutionAlreadyStartedError` if start workflow failed with given policy.\n **/\n shared.StartWorkflowExecutionResponse SignalWithStartWorkflowExecution(1: SignalWithStartWorkflowExecutionRequest signalWithStartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: ShardOwnershipLostError shardOwnershipLostError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.WorkflowExecutionAlreadyStartedError workflowAlreadyStartedError,\n )\n\n /**\n * RemoveSignalMutableState is used to remove a signal request ID that was previously recorded. This is currently\n * used to clean execution info when signal decision finished.\n **/\n void RemoveSignalMutableState(1: RemoveSignalMutableStateRequest removeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * TerminateWorkflowExecution terminates an existing workflow execution by recording WorkflowExecutionTerminated event\n * in the history and immediately terminating the execution instance.\n **/\n void TerminateWorkflowExecution(1: TerminateWorkflowExecutionRequest terminateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * ResetWorkflowExecution reset an existing workflow execution by a firstEventID of a existing event batch\n * in the history and immediately terminating the current execution instance.\n * After reset, the history will grow from nextFirstEventID.\n **/\n shared.ResetWorkflowExecutionResponse ResetWorkflowExecution(1: ResetWorkflowExecutionRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RequestCancelWorkflowExecution is called by application worker when it wants to request cancellation of a workflow instance.\n * It will result in a new 'WorkflowExecutionCancelRequested' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. It fails with 'EntityNotExistsError' if the workflow is not valid\n * anymore due to completion or doesn't exist.\n **/\n void RequestCancelWorkflowExecution(1: RequestCancelWorkflowExecutionRequest cancelRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.CancellationAlreadyRequestedError cancellationAlreadyRequestedError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * ScheduleDecisionTask is used for creating a decision task for already started workflow execution. This is mainly\n * used by transfer queue processor during the processing of StartChildWorkflowExecution task, where it first starts\n * child execution without creating the decision task and then calls this API after updating the mutable state of\n * parent execution.\n **/\n void ScheduleDecisionTask(1: ScheduleDecisionTaskRequest scheduleRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordChildExecutionCompleted is used for reporting the completion of child workflow execution to parent.\n * This is mainly called by transfer queue processor during the processing of DeleteExecution task.\n **/\n void RecordChildExecutionCompleted(1: RecordChildExecutionCompletedRequest completionRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * DescribeWorkflowExecution returns information about the specified workflow execution.\n **/\n shared.DescribeWorkflowExecutionResponse DescribeWorkflowExecution(1: DescribeWorkflowExecutionRequest describeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateEvents(1: ReplicateEventsRequest replicateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskError retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateRawEvents(1: ReplicateRawEventsRequest replicateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskError retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SyncShardStatus sync the status between shards\n **/\n void SyncShardStatus(1: SyncShardStatusRequest syncShardStatusRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SyncActivity sync the activity status\n **/\n void SyncActivity(1: SyncActivityRequest syncActivityRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.RetryTaskError retryTaskError,\n )\n\n /**\n * DescribeMutableState returns information about the internal states of workflow mutable state.\n **/\n DescribeMutableStateResponse DescribeMutableState(1: DescribeMutableStateRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.AccessDeniedError accessDeniedError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.LimitExceededError limitExceededError,\n )\n\n /**\n * DescribeHistoryHost returns information about the internal states of a history host\n **/\n shared.DescribeHistoryHostResponse DescribeHistoryHost(1: shared.DescribeHistoryHostRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n}\n" +const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\ninclude \"shared.thrift\"\ninclude \"replicator.thrift\"\n\nnamespace java com.uber.cadence.history\n\nexception EventAlreadyStartedError {\n 1: required string message\n}\n\nexception ShardOwnershipLostError {\n 10: optional string message\n 20: optional string owner\n}\n\nstruct ParentExecutionInfo {\n 10: optional string domainUUID\n 15: optional string domain\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") initiatedId\n}\n\nstruct StartWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.StartWorkflowExecutionRequest startRequest\n 30: optional ParentExecutionInfo parentExecutionInfo\n 40: optional i32 attempt\n 50: optional i64 (js.type = \"Long\") expirationTimestamp\n 55: optional shared.ContinueAsNewInitiator continueAsNewInitiator\n 56: optional string continuedFailureReason\n 57: optional binary continuedFailureDetails\n 58: optional binary lastCompletionResult\n 60: optional i32 firstDecisionTaskBackoffSeconds\n}\n\nstruct DescribeMutableStateRequest{\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n}\n\nstruct DescribeMutableStateResponse{\n 30: optional string mutableStateInCache\n 40: optional string mutableStateInDatabase\n}\n\nstruct GetMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") expectedNextEventId\n}\n\nstruct GetMutableStateResponse {\n 10: optional shared.WorkflowExecution execution\n 20: optional shared.WorkflowType workflowType\n 30: optional i64 (js.type = \"Long\") NextEventId\n 35: optional i64 (js.type = \"Long\") PreviousStartedEventId\n 40: optional i64 (js.type = \"Long\") LastFirstEventId\n 50: optional shared.TaskList taskList\n 60: optional shared.TaskList stickyTaskList\n 70: optional string clientLibraryVersion\n 80: optional string clientFeatureVersion\n 90: optional string clientImpl\n 100: optional bool isWorkflowRunning\n 110: optional i32 stickyTaskListScheduleToStartTimeout\n 120: optional i32 eventStoreVersion\n 130: optional binary branchToken\n 140: optional map replicationInfo\n}\n\nstruct ResetStickyTaskListRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n}\n\nstruct ResetStickyTaskListResponse {\n // The reason to keep this response is to allow returning\n // information in the future.\n}\n\nstruct RespondDecisionTaskCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondDecisionTaskCompletedRequest completeRequest\n}\n\nstruct RespondDecisionTaskCompletedResponse {\n 10: optional RecordDecisionTaskStartedResponse startedResponse\n}\n\nstruct RespondDecisionTaskFailedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondDecisionTaskFailedRequest failedRequest\n}\n\nstruct RecordActivityTaskHeartbeatRequest {\n 10: optional string domainUUID\n 20: optional shared.RecordActivityTaskHeartbeatRequest heartbeatRequest\n}\n\nstruct RespondActivityTaskCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskCompletedRequest completeRequest\n}\n\nstruct RespondActivityTaskFailedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskFailedRequest failedRequest\n}\n\nstruct RespondActivityTaskCanceledRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskCanceledRequest cancelRequest\n}\n\nstruct RecordActivityTaskStartedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") scheduleId\n 40: optional i64 (js.type = \"Long\") taskId\n 45: optional string requestId // Unique id of each poll request. Used to ensure at most once delivery of tasks.\n 50: optional shared.PollForActivityTaskRequest pollRequest\n}\n\nstruct RecordActivityTaskStartedResponse {\n 20: optional shared.HistoryEvent scheduledEvent\n 30: optional i64 (js.type = \"Long\") startedTimestamp\n 40: optional i64 (js.type = \"Long\") attempt\n 50: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 60: optional binary heartbeatDetails\n 70: optional shared.WorkflowType workflowType\n 80: optional string workflowDomain\n}\n\nstruct RecordDecisionTaskStartedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") scheduleId\n 40: optional i64 (js.type = \"Long\") taskId\n 45: optional string requestId // Unique id of each poll request. Used to ensure at most once delivery of tasks.\n 50: optional shared.PollForDecisionTaskRequest pollRequest\n}\n\nstruct RecordDecisionTaskStartedResponse {\n 10: optional shared.WorkflowType workflowType\n 20: optional i64 (js.type = \"Long\") previousStartedEventId\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") attempt\n 70: optional bool stickyExecutionEnabled\n 80: optional shared.TransientDecisionInfo decisionInfo\n 90: optional shared.TaskList WorkflowExecutionTaskList\n 100: optional i32 eventStoreVersion\n 110: optional binary branchToken\n 120: optional i64 (js.type = \"Long\") scheduledTimestamp\n 130: optional i64 (js.type = \"Long\") startedTimestamp\n}\n\nstruct SignalWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.SignalWorkflowExecutionRequest signalRequest\n 30: optional shared.WorkflowExecution externalWorkflowExecution\n 40: optional bool childWorkflowOnly\n}\n\nstruct SignalWithStartWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.SignalWithStartWorkflowExecutionRequest signalWithStartRequest\n}\n\nstruct RemoveSignalMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional string requestId\n}\n\nstruct TerminateWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.TerminateWorkflowExecutionRequest terminateRequest\n}\n\nstruct ResetWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.ResetWorkflowExecutionRequest resetRequest\n}\n\nstruct RequestCancelWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.RequestCancelWorkflowExecutionRequest cancelRequest\n 30: optional i64 (js.type = \"Long\") externalInitiatedEventId\n 40: optional shared.WorkflowExecution externalWorkflowExecution\n 50: optional bool childWorkflowOnly\n}\n\nstruct ScheduleDecisionTaskRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional bool isFirstDecision\n}\n\nstruct DescribeWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.DescribeWorkflowExecutionRequest request\n}\n\n/**\n* RecordChildExecutionCompletedRequest is used for reporting the completion of child execution to parent workflow\n* execution which started it. When a child execution is completed it creates this request and calls the\n* RecordChildExecutionCompleted API with the workflowExecution of parent. It also sets the completedExecution of the\n* child as it could potentially be different than the ChildExecutionStartedEvent of parent in the situation when\n* child creates multiple runs through ContinueAsNew before finally completing.\n**/\nstruct RecordChildExecutionCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") initiatedId\n 40: optional shared.WorkflowExecution completedExecution\n 50: optional shared.HistoryEvent completionEvent\n}\n\nstruct ReplicateEventsRequest {\n 10: optional string sourceCluster\n 20: optional string domainUUID\n 30: optional shared.WorkflowExecution workflowExecution\n 40: optional i64 (js.type = \"Long\") firstEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") version\n 70: optional map replicationInfo\n 80: optional shared.History history\n 90: optional shared.History newRunHistory\n 100: optional bool forceBufferEvents // this attribute is deprecated\n 110: optional i32 eventStoreVersion\n 120: optional i32 newRunEventStoreVersion\n 130: optional bool resetWorkflow\n}\n\nstruct ReplicateRawEventsRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional map replicationInfo\n 40: optional shared.DataBlob history\n 50: optional shared.DataBlob newRunHistory\n 60: optional i32 eventStoreVersion\n 70: optional i32 newRunEventStoreVersion\n}\n\nstruct SyncShardStatusRequest {\n 10: optional string sourceCluster\n 20: optional i64 (js.type = \"Long\") shardId\n 30: optional i64 (js.type = \"Long\") timestamp\n}\n\nstruct SyncActivityRequest {\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") version\n 50: optional i64 (js.type = \"Long\") scheduledId\n 60: optional i64 (js.type = \"Long\") scheduledTime\n 70: optional i64 (js.type = \"Long\") startedId\n 80: optional i64 (js.type = \"Long\") startedTime\n 90: optional i64 (js.type = \"Long\") lastHeartbeatTime\n 100: optional binary details\n 110: optional i32 attempt\n 120: optional string lastFailureReason\n 130: optional string lastWorkerIdentity\n}\n\n/**\n* HistoryService provides API to start a new long running workflow instance, as well as query and update the history\n* of workflow instances already created.\n**/\nservice HistoryService {\n /**\n * StartWorkflowExecution starts a new long running workflow instance. It will create the instance with\n * 'WorkflowExecutionStarted' event in history and also schedule the first DecisionTask for the worker to make the\n * first decision for this instance. It will return 'WorkflowExecutionAlreadyStartedError', if an instance already\n * exists with same workflowId.\n **/\n shared.StartWorkflowExecutionResponse StartWorkflowExecution(1: StartWorkflowExecutionRequest startRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * Returns the information from mutable state of workflow execution.\n * It fails with 'EntityNotExistError' if specified workflow execution in unknown to the service.\n **/\n GetMutableStateResponse GetMutableState(1: GetMutableStateRequest getRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * Reset the sticky tasklist related information in mutable state of a given workflow.\n * Things cleared are:\n * 1. StickyTaskList\n * 2. StickyScheduleToStartTimeout\n * 3. ClientLibraryVersion\n * 4. ClientFeatureVersion\n * 5. ClientImpl\n **/\n ResetStickyTaskListResponse ResetStickyTaskList(1: ResetStickyTaskListRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordDecisionTaskStarted is called by the Matchingservice before it hands a decision task to the application worker in response to\n * a PollForDecisionTask call. It records in the history the event that the decision task has started. It will return 'EventAlreadyStartedError',\n * if the workflow's execution history already includes a record of the event starting.\n **/\n RecordDecisionTaskStartedResponse RecordDecisionTaskStarted(1: RecordDecisionTaskStartedRequest addRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: EventAlreadyStartedError eventAlreadyStartedError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordActivityTaskStarted is called by the Matchingservice before it hands a decision task to the application worker in response to\n * a PollForActivityTask call. It records in the history the event that the decision task has started. It will return 'EventAlreadyStartedError',\n * if the workflow's execution history already includes a record of the event starting.\n **/\n RecordActivityTaskStartedResponse RecordActivityTaskStarted(1: RecordActivityTaskStartedRequest addRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: EventAlreadyStartedError eventAlreadyStartedError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondDecisionTaskCompleted is called by application worker to complete a DecisionTask handed as a result of\n * 'PollForDecisionTask' API call. Completing a DecisionTask will result in new events for the workflow execution and\n * potentially new ActivityTask being created for corresponding decisions. It will also create a DecisionTaskCompleted\n * event in the history for that session. Use the 'taskToken' provided as response of PollForDecisionTask API call\n * for completing the DecisionTask.\n **/\n RespondDecisionTaskCompletedResponse RespondDecisionTaskCompleted(1: RespondDecisionTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondDecisionTaskFailed is called by application worker to indicate failure. This results in\n * DecisionTaskFailedEvent written to the history and a new DecisionTask created. This API can be used by client to\n * either clear sticky tasklist or report ny panics during DecisionTask processing.\n **/\n void RespondDecisionTaskFailed(1: RespondDecisionTaskFailedRequest failedRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordActivityTaskHeartbeat is called by application worker while it is processing an ActivityTask. If worker fails\n * to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and\n * 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeat' will\n * fail with 'EntityNotExistsError' in such situations. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for heartbeating.\n **/\n shared.RecordActivityTaskHeartbeatResponse RecordActivityTaskHeartbeat(1: RecordActivityTaskHeartbeatRequest heartbeatRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskCompleted is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new DecisionTask\n * created for the workflow so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCompleted(1: RespondActivityTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskFailed is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskFailed' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskFailed(1: RespondActivityTaskFailedRequest failRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskCanceled is called by application worker when it is successfully canceled an ActivityTask. It will\n * result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCanceled(1: RespondActivityTaskCanceledRequest canceledRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in\n * WorkflowExecutionSignaled event recorded in the history and a decision task being created for the execution.\n **/\n void SignalWorkflowExecution(1: SignalWorkflowExecutionRequest signalRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.LimitExceededError limitExceededError,\n )\n\n /**\n * SignalWithStartWorkflowExecution is used to ensure sending a signal event to a workflow execution.\n * If workflow is running, this results in WorkflowExecutionSignaled event recorded in the history\n * and a decision task being created for the execution.\n * If workflow is not running or not found, it will first try start workflow with given WorkflowIDResuePolicy,\n * and record WorkflowExecutionStarted and WorkflowExecutionSignaled event in case of success.\n * It will return `WorkflowExecutionAlreadyStartedError` if start workflow failed with given policy.\n **/\n shared.StartWorkflowExecutionResponse SignalWithStartWorkflowExecution(1: SignalWithStartWorkflowExecutionRequest signalWithStartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: ShardOwnershipLostError shardOwnershipLostError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.WorkflowExecutionAlreadyStartedError workflowAlreadyStartedError,\n )\n\n /**\n * RemoveSignalMutableState is used to remove a signal request ID that was previously recorded. This is currently\n * used to clean execution info when signal decision finished.\n **/\n void RemoveSignalMutableState(1: RemoveSignalMutableStateRequest removeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * TerminateWorkflowExecution terminates an existing workflow execution by recording WorkflowExecutionTerminated event\n * in the history and immediately terminating the execution instance.\n **/\n void TerminateWorkflowExecution(1: TerminateWorkflowExecutionRequest terminateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * ResetWorkflowExecution reset an existing workflow execution by a firstEventID of a existing event batch\n * in the history and immediately terminating the current execution instance.\n * After reset, the history will grow from nextFirstEventID.\n **/\n shared.ResetWorkflowExecutionResponse ResetWorkflowExecution(1: ResetWorkflowExecutionRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RequestCancelWorkflowExecution is called by application worker when it wants to request cancellation of a workflow instance.\n * It will result in a new 'WorkflowExecutionCancelRequested' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. It fails with 'EntityNotExistsError' if the workflow is not valid\n * anymore due to completion or doesn't exist.\n **/\n void RequestCancelWorkflowExecution(1: RequestCancelWorkflowExecutionRequest cancelRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.CancellationAlreadyRequestedError cancellationAlreadyRequestedError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * ScheduleDecisionTask is used for creating a decision task for already started workflow execution. This is mainly\n * used by transfer queue processor during the processing of StartChildWorkflowExecution task, where it first starts\n * child execution without creating the decision task and then calls this API after updating the mutable state of\n * parent execution.\n **/\n void ScheduleDecisionTask(1: ScheduleDecisionTaskRequest scheduleRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordChildExecutionCompleted is used for reporting the completion of child workflow execution to parent.\n * This is mainly called by transfer queue processor during the processing of DeleteExecution task.\n **/\n void RecordChildExecutionCompleted(1: RecordChildExecutionCompletedRequest completionRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * DescribeWorkflowExecution returns information about the specified workflow execution.\n **/\n shared.DescribeWorkflowExecutionResponse DescribeWorkflowExecution(1: DescribeWorkflowExecutionRequest describeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateEvents(1: ReplicateEventsRequest replicateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskError retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateRawEvents(1: ReplicateRawEventsRequest replicateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskError retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SyncShardStatus sync the status between shards\n **/\n void SyncShardStatus(1: SyncShardStatusRequest syncShardStatusRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SyncActivity sync the activity status\n **/\n void SyncActivity(1: SyncActivityRequest syncActivityRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.RetryTaskError retryTaskError,\n )\n\n /**\n * DescribeMutableState returns information about the internal states of workflow mutable state.\n **/\n DescribeMutableStateResponse DescribeMutableState(1: DescribeMutableStateRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.AccessDeniedError accessDeniedError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.LimitExceededError limitExceededError,\n )\n\n /**\n * DescribeHistoryHost returns information about the internal states of a history host\n **/\n shared.DescribeHistoryHostResponse DescribeHistoryHost(1: shared.DescribeHistoryHostRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n\n replicator.GetReplicationMessagesResponse GetReplicationMessages(1: replicator.GetReplicationMessagesRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.LimitExceededError limitExceededError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n}\n" // HistoryService_DescribeHistoryHost_Args represents the arguments for the HistoryService.DescribeHistoryHost function. // @@ -12935,6 +12937,711 @@ func (v *HistoryService_GetMutableState_Result) EnvelopeType() wire.EnvelopeType return wire.Reply } +// HistoryService_GetReplicationMessages_Args represents the arguments for the HistoryService.GetReplicationMessages function. +// +// The arguments for GetReplicationMessages are sent and received over the wire as this struct. +type HistoryService_GetReplicationMessages_Args struct { + Request *replicator.GetReplicationMessagesRequest `json:"request,omitempty"` +} + +// ToWire translates a HistoryService_GetReplicationMessages_Args struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *HistoryService_GetReplicationMessages_Args) ToWire() (wire.Value, error) { + var ( + fields [1]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.Request != nil { + w, err = v.Request.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 1, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _GetReplicationMessagesRequest_Read(w wire.Value) (*replicator.GetReplicationMessagesRequest, error) { + var v replicator.GetReplicationMessagesRequest + err := v.FromWire(w) + return &v, err +} + +// FromWire deserializes a HistoryService_GetReplicationMessages_Args struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a HistoryService_GetReplicationMessages_Args struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v HistoryService_GetReplicationMessages_Args +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *HistoryService_GetReplicationMessages_Args) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 1: + if field.Value.Type() == wire.TStruct { + v.Request, err = _GetReplicationMessagesRequest_Read(field.Value) + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a HistoryService_GetReplicationMessages_Args +// struct. +func (v *HistoryService_GetReplicationMessages_Args) String() string { + if v == nil { + return "" + } + + var fields [1]string + i := 0 + if v.Request != nil { + fields[i] = fmt.Sprintf("Request: %v", v.Request) + i++ + } + + return fmt.Sprintf("HistoryService_GetReplicationMessages_Args{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this HistoryService_GetReplicationMessages_Args match the +// provided HistoryService_GetReplicationMessages_Args. +// +// This function performs a deep comparison. +func (v *HistoryService_GetReplicationMessages_Args) Equals(rhs *HistoryService_GetReplicationMessages_Args) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.Request == nil && rhs.Request == nil) || (v.Request != nil && rhs.Request != nil && v.Request.Equals(rhs.Request))) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of HistoryService_GetReplicationMessages_Args. +func (v *HistoryService_GetReplicationMessages_Args) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.Request != nil { + err = multierr.Append(err, enc.AddObject("request", v.Request)) + } + return err +} + +// GetRequest returns the value of Request if it is set or its +// zero value if it is unset. +func (v *HistoryService_GetReplicationMessages_Args) GetRequest() (o *replicator.GetReplicationMessagesRequest) { + if v != nil && v.Request != nil { + return v.Request + } + + return +} + +// IsSetRequest returns true if Request is not nil. +func (v *HistoryService_GetReplicationMessages_Args) IsSetRequest() bool { + return v != nil && v.Request != nil +} + +// MethodName returns the name of the Thrift function as specified in +// the IDL, for which this struct represent the arguments. +// +// This will always be "GetReplicationMessages" for this struct. +func (v *HistoryService_GetReplicationMessages_Args) MethodName() string { + return "GetReplicationMessages" +} + +// EnvelopeType returns the kind of value inside this struct. +// +// This will always be Call for this struct. +func (v *HistoryService_GetReplicationMessages_Args) EnvelopeType() wire.EnvelopeType { + return wire.Call +} + +// HistoryService_GetReplicationMessages_Helper provides functions that aid in handling the +// parameters and return values of the HistoryService.GetReplicationMessages +// function. +var HistoryService_GetReplicationMessages_Helper = struct { + // Args accepts the parameters of GetReplicationMessages in-order and returns + // the arguments struct for the function. + Args func( + request *replicator.GetReplicationMessagesRequest, + ) *HistoryService_GetReplicationMessages_Args + + // IsException returns true if the given error can be thrown + // by GetReplicationMessages. + // + // An error can be thrown by GetReplicationMessages only if the + // corresponding exception type was mentioned in the 'throws' + // section for it in the Thrift file. + IsException func(error) bool + + // WrapResponse returns the result struct for GetReplicationMessages + // given its return value and error. + // + // This allows mapping values and errors returned by + // GetReplicationMessages into a serializable result struct. + // WrapResponse returns a non-nil error if the provided + // error cannot be thrown by GetReplicationMessages + // + // value, err := GetReplicationMessages(args) + // result, err := HistoryService_GetReplicationMessages_Helper.WrapResponse(value, err) + // if err != nil { + // return fmt.Errorf("unexpected error from GetReplicationMessages: %v", err) + // } + // serialize(result) + WrapResponse func(*replicator.GetReplicationMessagesResponse, error) (*HistoryService_GetReplicationMessages_Result, error) + + // UnwrapResponse takes the result struct for GetReplicationMessages + // and returns the value or error returned by it. + // + // The error is non-nil only if GetReplicationMessages threw an + // exception. + // + // result := deserialize(bytes) + // value, err := HistoryService_GetReplicationMessages_Helper.UnwrapResponse(result) + UnwrapResponse func(*HistoryService_GetReplicationMessages_Result) (*replicator.GetReplicationMessagesResponse, error) +}{} + +func init() { + HistoryService_GetReplicationMessages_Helper.Args = func( + request *replicator.GetReplicationMessagesRequest, + ) *HistoryService_GetReplicationMessages_Args { + return &HistoryService_GetReplicationMessages_Args{ + Request: request, + } + } + + HistoryService_GetReplicationMessages_Helper.IsException = func(err error) bool { + switch err.(type) { + case *shared.BadRequestError: + return true + case *shared.InternalServiceError: + return true + case *shared.LimitExceededError: + return true + case *shared.ServiceBusyError: + return true + case *shared.ClientVersionNotSupportedError: + return true + default: + return false + } + } + + HistoryService_GetReplicationMessages_Helper.WrapResponse = func(success *replicator.GetReplicationMessagesResponse, err error) (*HistoryService_GetReplicationMessages_Result, error) { + if err == nil { + return &HistoryService_GetReplicationMessages_Result{Success: success}, nil + } + + switch e := err.(type) { + case *shared.BadRequestError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_GetReplicationMessages_Result.BadRequestError") + } + return &HistoryService_GetReplicationMessages_Result{BadRequestError: e}, nil + case *shared.InternalServiceError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_GetReplicationMessages_Result.InternalServiceError") + } + return &HistoryService_GetReplicationMessages_Result{InternalServiceError: e}, nil + case *shared.LimitExceededError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_GetReplicationMessages_Result.LimitExceededError") + } + return &HistoryService_GetReplicationMessages_Result{LimitExceededError: e}, nil + case *shared.ServiceBusyError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_GetReplicationMessages_Result.ServiceBusyError") + } + return &HistoryService_GetReplicationMessages_Result{ServiceBusyError: e}, nil + case *shared.ClientVersionNotSupportedError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_GetReplicationMessages_Result.ClientVersionNotSupportedError") + } + return &HistoryService_GetReplicationMessages_Result{ClientVersionNotSupportedError: e}, nil + } + + return nil, err + } + HistoryService_GetReplicationMessages_Helper.UnwrapResponse = func(result *HistoryService_GetReplicationMessages_Result) (success *replicator.GetReplicationMessagesResponse, err error) { + if result.BadRequestError != nil { + err = result.BadRequestError + return + } + if result.InternalServiceError != nil { + err = result.InternalServiceError + return + } + if result.LimitExceededError != nil { + err = result.LimitExceededError + return + } + if result.ServiceBusyError != nil { + err = result.ServiceBusyError + return + } + if result.ClientVersionNotSupportedError != nil { + err = result.ClientVersionNotSupportedError + return + } + + if result.Success != nil { + success = result.Success + return + } + + err = errors.New("expected a non-void result") + return + } + +} + +// HistoryService_GetReplicationMessages_Result represents the result of a HistoryService.GetReplicationMessages function call. +// +// The result of a GetReplicationMessages execution is sent and received over the wire as this struct. +// +// Success is set only if the function did not throw an exception. +type HistoryService_GetReplicationMessages_Result struct { + // Value returned by GetReplicationMessages after a successful execution. + Success *replicator.GetReplicationMessagesResponse `json:"success,omitempty"` + BadRequestError *shared.BadRequestError `json:"badRequestError,omitempty"` + InternalServiceError *shared.InternalServiceError `json:"internalServiceError,omitempty"` + LimitExceededError *shared.LimitExceededError `json:"limitExceededError,omitempty"` + ServiceBusyError *shared.ServiceBusyError `json:"serviceBusyError,omitempty"` + ClientVersionNotSupportedError *shared.ClientVersionNotSupportedError `json:"clientVersionNotSupportedError,omitempty"` +} + +// ToWire translates a HistoryService_GetReplicationMessages_Result struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *HistoryService_GetReplicationMessages_Result) ToWire() (wire.Value, error) { + var ( + fields [6]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.Success != nil { + w, err = v.Success.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 0, Value: w} + i++ + } + if v.BadRequestError != nil { + w, err = v.BadRequestError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 1, Value: w} + i++ + } + if v.InternalServiceError != nil { + w, err = v.InternalServiceError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 2, Value: w} + i++ + } + if v.LimitExceededError != nil { + w, err = v.LimitExceededError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 3, Value: w} + i++ + } + if v.ServiceBusyError != nil { + w, err = v.ServiceBusyError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 4, Value: w} + i++ + } + if v.ClientVersionNotSupportedError != nil { + w, err = v.ClientVersionNotSupportedError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 5, Value: w} + i++ + } + + if i != 1 { + return wire.Value{}, fmt.Errorf("HistoryService_GetReplicationMessages_Result should have exactly one field: got %v fields", i) + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _GetReplicationMessagesResponse_Read(w wire.Value) (*replicator.GetReplicationMessagesResponse, error) { + var v replicator.GetReplicationMessagesResponse + err := v.FromWire(w) + return &v, err +} + +func _ClientVersionNotSupportedError_Read(w wire.Value) (*shared.ClientVersionNotSupportedError, error) { + var v shared.ClientVersionNotSupportedError + err := v.FromWire(w) + return &v, err +} + +// FromWire deserializes a HistoryService_GetReplicationMessages_Result struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a HistoryService_GetReplicationMessages_Result struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v HistoryService_GetReplicationMessages_Result +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *HistoryService_GetReplicationMessages_Result) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 0: + if field.Value.Type() == wire.TStruct { + v.Success, err = _GetReplicationMessagesResponse_Read(field.Value) + if err != nil { + return err + } + + } + case 1: + if field.Value.Type() == wire.TStruct { + v.BadRequestError, err = _BadRequestError_Read(field.Value) + if err != nil { + return err + } + + } + case 2: + if field.Value.Type() == wire.TStruct { + v.InternalServiceError, err = _InternalServiceError_Read(field.Value) + if err != nil { + return err + } + + } + case 3: + if field.Value.Type() == wire.TStruct { + v.LimitExceededError, err = _LimitExceededError_Read(field.Value) + if err != nil { + return err + } + + } + case 4: + if field.Value.Type() == wire.TStruct { + v.ServiceBusyError, err = _ServiceBusyError_Read(field.Value) + if err != nil { + return err + } + + } + case 5: + if field.Value.Type() == wire.TStruct { + v.ClientVersionNotSupportedError, err = _ClientVersionNotSupportedError_Read(field.Value) + if err != nil { + return err + } + + } + } + } + + count := 0 + if v.Success != nil { + count++ + } + if v.BadRequestError != nil { + count++ + } + if v.InternalServiceError != nil { + count++ + } + if v.LimitExceededError != nil { + count++ + } + if v.ServiceBusyError != nil { + count++ + } + if v.ClientVersionNotSupportedError != nil { + count++ + } + if count != 1 { + return fmt.Errorf("HistoryService_GetReplicationMessages_Result should have exactly one field: got %v fields", count) + } + + return nil +} + +// String returns a readable string representation of a HistoryService_GetReplicationMessages_Result +// struct. +func (v *HistoryService_GetReplicationMessages_Result) String() string { + if v == nil { + return "" + } + + var fields [6]string + i := 0 + if v.Success != nil { + fields[i] = fmt.Sprintf("Success: %v", v.Success) + i++ + } + if v.BadRequestError != nil { + fields[i] = fmt.Sprintf("BadRequestError: %v", v.BadRequestError) + i++ + } + if v.InternalServiceError != nil { + fields[i] = fmt.Sprintf("InternalServiceError: %v", v.InternalServiceError) + i++ + } + if v.LimitExceededError != nil { + fields[i] = fmt.Sprintf("LimitExceededError: %v", v.LimitExceededError) + i++ + } + if v.ServiceBusyError != nil { + fields[i] = fmt.Sprintf("ServiceBusyError: %v", v.ServiceBusyError) + i++ + } + if v.ClientVersionNotSupportedError != nil { + fields[i] = fmt.Sprintf("ClientVersionNotSupportedError: %v", v.ClientVersionNotSupportedError) + i++ + } + + return fmt.Sprintf("HistoryService_GetReplicationMessages_Result{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this HistoryService_GetReplicationMessages_Result match the +// provided HistoryService_GetReplicationMessages_Result. +// +// This function performs a deep comparison. +func (v *HistoryService_GetReplicationMessages_Result) Equals(rhs *HistoryService_GetReplicationMessages_Result) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.Success == nil && rhs.Success == nil) || (v.Success != nil && rhs.Success != nil && v.Success.Equals(rhs.Success))) { + return false + } + if !((v.BadRequestError == nil && rhs.BadRequestError == nil) || (v.BadRequestError != nil && rhs.BadRequestError != nil && v.BadRequestError.Equals(rhs.BadRequestError))) { + return false + } + if !((v.InternalServiceError == nil && rhs.InternalServiceError == nil) || (v.InternalServiceError != nil && rhs.InternalServiceError != nil && v.InternalServiceError.Equals(rhs.InternalServiceError))) { + return false + } + if !((v.LimitExceededError == nil && rhs.LimitExceededError == nil) || (v.LimitExceededError != nil && rhs.LimitExceededError != nil && v.LimitExceededError.Equals(rhs.LimitExceededError))) { + return false + } + if !((v.ServiceBusyError == nil && rhs.ServiceBusyError == nil) || (v.ServiceBusyError != nil && rhs.ServiceBusyError != nil && v.ServiceBusyError.Equals(rhs.ServiceBusyError))) { + return false + } + if !((v.ClientVersionNotSupportedError == nil && rhs.ClientVersionNotSupportedError == nil) || (v.ClientVersionNotSupportedError != nil && rhs.ClientVersionNotSupportedError != nil && v.ClientVersionNotSupportedError.Equals(rhs.ClientVersionNotSupportedError))) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of HistoryService_GetReplicationMessages_Result. +func (v *HistoryService_GetReplicationMessages_Result) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.Success != nil { + err = multierr.Append(err, enc.AddObject("success", v.Success)) + } + if v.BadRequestError != nil { + err = multierr.Append(err, enc.AddObject("badRequestError", v.BadRequestError)) + } + if v.InternalServiceError != nil { + err = multierr.Append(err, enc.AddObject("internalServiceError", v.InternalServiceError)) + } + if v.LimitExceededError != nil { + err = multierr.Append(err, enc.AddObject("limitExceededError", v.LimitExceededError)) + } + if v.ServiceBusyError != nil { + err = multierr.Append(err, enc.AddObject("serviceBusyError", v.ServiceBusyError)) + } + if v.ClientVersionNotSupportedError != nil { + err = multierr.Append(err, enc.AddObject("clientVersionNotSupportedError", v.ClientVersionNotSupportedError)) + } + return err +} + +// GetSuccess returns the value of Success if it is set or its +// zero value if it is unset. +func (v *HistoryService_GetReplicationMessages_Result) GetSuccess() (o *replicator.GetReplicationMessagesResponse) { + if v != nil && v.Success != nil { + return v.Success + } + + return +} + +// IsSetSuccess returns true if Success is not nil. +func (v *HistoryService_GetReplicationMessages_Result) IsSetSuccess() bool { + return v != nil && v.Success != nil +} + +// GetBadRequestError returns the value of BadRequestError if it is set or its +// zero value if it is unset. +func (v *HistoryService_GetReplicationMessages_Result) GetBadRequestError() (o *shared.BadRequestError) { + if v != nil && v.BadRequestError != nil { + return v.BadRequestError + } + + return +} + +// IsSetBadRequestError returns true if BadRequestError is not nil. +func (v *HistoryService_GetReplicationMessages_Result) IsSetBadRequestError() bool { + return v != nil && v.BadRequestError != nil +} + +// GetInternalServiceError returns the value of InternalServiceError if it is set or its +// zero value if it is unset. +func (v *HistoryService_GetReplicationMessages_Result) GetInternalServiceError() (o *shared.InternalServiceError) { + if v != nil && v.InternalServiceError != nil { + return v.InternalServiceError + } + + return +} + +// IsSetInternalServiceError returns true if InternalServiceError is not nil. +func (v *HistoryService_GetReplicationMessages_Result) IsSetInternalServiceError() bool { + return v != nil && v.InternalServiceError != nil +} + +// GetLimitExceededError returns the value of LimitExceededError if it is set or its +// zero value if it is unset. +func (v *HistoryService_GetReplicationMessages_Result) GetLimitExceededError() (o *shared.LimitExceededError) { + if v != nil && v.LimitExceededError != nil { + return v.LimitExceededError + } + + return +} + +// IsSetLimitExceededError returns true if LimitExceededError is not nil. +func (v *HistoryService_GetReplicationMessages_Result) IsSetLimitExceededError() bool { + return v != nil && v.LimitExceededError != nil +} + +// GetServiceBusyError returns the value of ServiceBusyError if it is set or its +// zero value if it is unset. +func (v *HistoryService_GetReplicationMessages_Result) GetServiceBusyError() (o *shared.ServiceBusyError) { + if v != nil && v.ServiceBusyError != nil { + return v.ServiceBusyError + } + + return +} + +// IsSetServiceBusyError returns true if ServiceBusyError is not nil. +func (v *HistoryService_GetReplicationMessages_Result) IsSetServiceBusyError() bool { + return v != nil && v.ServiceBusyError != nil +} + +// GetClientVersionNotSupportedError returns the value of ClientVersionNotSupportedError if it is set or its +// zero value if it is unset. +func (v *HistoryService_GetReplicationMessages_Result) GetClientVersionNotSupportedError() (o *shared.ClientVersionNotSupportedError) { + if v != nil && v.ClientVersionNotSupportedError != nil { + return v.ClientVersionNotSupportedError + } + + return +} + +// IsSetClientVersionNotSupportedError returns true if ClientVersionNotSupportedError is not nil. +func (v *HistoryService_GetReplicationMessages_Result) IsSetClientVersionNotSupportedError() bool { + return v != nil && v.ClientVersionNotSupportedError != nil +} + +// MethodName returns the name of the Thrift function as specified in +// the IDL, for which this struct represent the result. +// +// This will always be "GetReplicationMessages" for this struct. +func (v *HistoryService_GetReplicationMessages_Result) MethodName() string { + return "GetReplicationMessages" +} + +// EnvelopeType returns the kind of value inside this struct. +// +// This will always be Reply for this struct. +func (v *HistoryService_GetReplicationMessages_Result) EnvelopeType() wire.EnvelopeType { + return wire.Reply +} + // HistoryService_RecordActivityTaskHeartbeat_Args represents the arguments for the HistoryService.RecordActivityTaskHeartbeat function. // // The arguments for RecordActivityTaskHeartbeat are sent and received over the wire as this struct. diff --git a/.gen/go/history/historyserviceclient/client.go b/.gen/go/history/historyserviceclient/client.go index e756c831406..0d5eff047cd 100644 --- a/.gen/go/history/historyserviceclient/client.go +++ b/.gen/go/history/historyserviceclient/client.go @@ -28,6 +28,7 @@ package historyserviceclient import ( context "context" history "github.com/uber/cadence/.gen/go/history" + replicator "github.com/uber/cadence/.gen/go/replicator" shared "github.com/uber/cadence/.gen/go/shared" wire "go.uber.org/thriftrw/wire" yarpc "go.uber.org/yarpc" @@ -62,6 +63,12 @@ type Interface interface { opts ...yarpc.CallOption, ) (*history.GetMutableStateResponse, error) + GetReplicationMessages( + ctx context.Context, + Request *replicator.GetReplicationMessagesRequest, + opts ...yarpc.CallOption, + ) (*replicator.GetReplicationMessagesResponse, error) + RecordActivityTaskHeartbeat( ctx context.Context, HeartbeatRequest *history.RecordActivityTaskHeartbeatRequest, @@ -311,6 +318,29 @@ func (c client) GetMutableState( return } +func (c client) GetReplicationMessages( + ctx context.Context, + _Request *replicator.GetReplicationMessagesRequest, + opts ...yarpc.CallOption, +) (success *replicator.GetReplicationMessagesResponse, err error) { + + args := history.HistoryService_GetReplicationMessages_Helper.Args(_Request) + + var body wire.Value + body, err = c.c.Call(ctx, args, opts...) + if err != nil { + return + } + + var result history.HistoryService_GetReplicationMessages_Result + if err = result.FromWire(body); err != nil { + return + } + + success, err = history.HistoryService_GetReplicationMessages_Helper.UnwrapResponse(&result) + return +} + func (c client) RecordActivityTaskHeartbeat( ctx context.Context, _HeartbeatRequest *history.RecordActivityTaskHeartbeatRequest, diff --git a/.gen/go/history/historyserviceserver/server.go b/.gen/go/history/historyserviceserver/server.go index 389b8462c71..811b4e89243 100644 --- a/.gen/go/history/historyserviceserver/server.go +++ b/.gen/go/history/historyserviceserver/server.go @@ -28,6 +28,7 @@ package historyserviceserver import ( context "context" history "github.com/uber/cadence/.gen/go/history" + replicator "github.com/uber/cadence/.gen/go/replicator" shared "github.com/uber/cadence/.gen/go/shared" wire "go.uber.org/thriftrw/wire" transport "go.uber.org/yarpc/api/transport" @@ -56,6 +57,11 @@ type Interface interface { GetRequest *history.GetMutableStateRequest, ) (*history.GetMutableStateResponse, error) + GetReplicationMessages( + ctx context.Context, + Request *replicator.GetReplicationMessagesRequest, + ) (*replicator.GetReplicationMessagesResponse, error) + RecordActivityTaskHeartbeat( ctx context.Context, HeartbeatRequest *history.RecordActivityTaskHeartbeatRequest, @@ -222,6 +228,17 @@ func New(impl Interface, opts ...thrift.RegisterOption) []transport.Procedure { ThriftModule: history.ThriftModule, }, + thrift.Method{ + Name: "GetReplicationMessages", + HandlerSpec: thrift.HandlerSpec{ + + Type: transport.Unary, + Unary: thrift.UnaryHandler(h.GetReplicationMessages), + }, + Signature: "GetReplicationMessages(Request *replicator.GetReplicationMessagesRequest) (*replicator.GetReplicationMessagesResponse)", + ThriftModule: history.ThriftModule, + }, + thrift.Method{ Name: "RecordActivityTaskHeartbeat", HandlerSpec: thrift.HandlerSpec{ @@ -466,7 +483,7 @@ func New(impl Interface, opts ...thrift.RegisterOption) []transport.Procedure { }, } - procedures := make([]transport.Procedure, 0, 26) + procedures := make([]transport.Procedure, 0, 27) procedures = append(procedures, thrift.BuildProcedures(service, opts...)...) return procedures } @@ -549,6 +566,25 @@ func (h handler) GetMutableState(ctx context.Context, body wire.Value) (thrift.R return response, err } +func (h handler) GetReplicationMessages(ctx context.Context, body wire.Value) (thrift.Response, error) { + var args history.HistoryService_GetReplicationMessages_Args + if err := args.FromWire(body); err != nil { + return thrift.Response{}, err + } + + success, err := h.impl.GetReplicationMessages(ctx, args.Request) + + hadError := err != nil + result, err := history.HistoryService_GetReplicationMessages_Helper.WrapResponse(success, err) + + var response thrift.Response + if err == nil { + response.IsApplicationError = hadError + response.Body = result + } + return response, err +} + func (h handler) RecordActivityTaskHeartbeat(ctx context.Context, body wire.Value) (thrift.Response, error) { var args history.HistoryService_RecordActivityTaskHeartbeat_Args if err := args.FromWire(body); err != nil { diff --git a/.gen/go/history/historyservicetest/client.go b/.gen/go/history/historyservicetest/client.go index 6ef802d802b..08e6aef5d23 100644 --- a/.gen/go/history/historyservicetest/client.go +++ b/.gen/go/history/historyservicetest/client.go @@ -30,6 +30,7 @@ import ( gomock "github.com/golang/mock/gomock" history "github.com/uber/cadence/.gen/go/history" historyserviceclient "github.com/uber/cadence/.gen/go/history/historyserviceclient" + replicator "github.com/uber/cadence/.gen/go/replicator" shared "github.com/uber/cadence/.gen/go/shared" yarpc "go.uber.org/yarpc" ) @@ -197,6 +198,39 @@ func (mr *_MockClientRecorder) GetMutableState( return mr.mock.ctrl.RecordCall(mr.mock, "GetMutableState", args...) } +// GetReplicationMessages responds to a GetReplicationMessages call based on the mock expectations. This +// call will fail if the mock does not expect this call. Use EXPECT to expect +// a call to this function. +// +// client.EXPECT().GetReplicationMessages(gomock.Any(), ...).Return(...) +// ... := client.GetReplicationMessages(...) +func (m *MockClient) GetReplicationMessages( + ctx context.Context, + _Request *replicator.GetReplicationMessagesRequest, + opts ...yarpc.CallOption, +) (success *replicator.GetReplicationMessagesResponse, err error) { + + args := []interface{}{ctx, _Request} + for _, o := range opts { + args = append(args, o) + } + i := 0 + ret := m.ctrl.Call(m, "GetReplicationMessages", args...) + success, _ = ret[i].(*replicator.GetReplicationMessagesResponse) + i++ + err, _ = ret[i].(error) + return +} + +func (mr *_MockClientRecorder) GetReplicationMessages( + ctx interface{}, + _Request interface{}, + opts ...interface{}, +) *gomock.Call { + args := append([]interface{}{ctx, _Request}, opts...) + return mr.mock.ctrl.RecordCall(mr.mock, "GetReplicationMessages", args...) +} + // RecordActivityTaskHeartbeat responds to a RecordActivityTaskHeartbeat call based on the mock expectations. This // call will fail if the mock does not expect this call. Use EXPECT to expect // a call to this function. diff --git a/.gen/go/replicator/replicator.go b/.gen/go/replicator/replicator.go index 5debe810dbe..e5c325576c5 100644 --- a/.gen/go/replicator/replicator.go +++ b/.gen/go/replicator/replicator.go @@ -20,7 +20,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -// Code generated by thriftrw v1.20.0. DO NOT EDIT. +// Code generated by thriftrw v1.20.1. DO NOT EDIT. // @generated package replicator @@ -30,7 +30,6 @@ import ( base64 "encoding/base64" json "encoding/json" fmt "fmt" - history "github.com/uber/cadence/.gen/go/history" shared "github.com/uber/cadence/.gen/go/shared" multierr "go.uber.org/multierr" thriftreflect "go.uber.org/thriftrw/thriftreflect" @@ -662,6 +661,470 @@ func (v *DomainTaskAttributes) IsSetFailoverVersion() bool { return v != nil && v.FailoverVersion != nil } +type GetReplicationMessagesRequest struct { + Tokens []*ReplicationToken `json:"tokens,omitempty"` +} + +type _List_ReplicationToken_ValueList []*ReplicationToken + +func (v _List_ReplicationToken_ValueList) ForEach(f func(wire.Value) error) error { + for i, x := range v { + if x == nil { + return fmt.Errorf("invalid [%v]: value is nil", i) + } + w, err := x.ToWire() + if err != nil { + return err + } + err = f(w) + if err != nil { + return err + } + } + return nil +} + +func (v _List_ReplicationToken_ValueList) Size() int { + return len(v) +} + +func (_List_ReplicationToken_ValueList) ValueType() wire.Type { + return wire.TStruct +} + +func (_List_ReplicationToken_ValueList) Close() {} + +// ToWire translates a GetReplicationMessagesRequest struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *GetReplicationMessagesRequest) ToWire() (wire.Value, error) { + var ( + fields [1]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.Tokens != nil { + w, err = wire.NewValueList(_List_ReplicationToken_ValueList(v.Tokens)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _ReplicationToken_Read(w wire.Value) (*ReplicationToken, error) { + var v ReplicationToken + err := v.FromWire(w) + return &v, err +} + +func _List_ReplicationToken_Read(l wire.ValueList) ([]*ReplicationToken, error) { + if l.ValueType() != wire.TStruct { + return nil, nil + } + + o := make([]*ReplicationToken, 0, l.Size()) + err := l.ForEach(func(x wire.Value) error { + i, err := _ReplicationToken_Read(x) + if err != nil { + return err + } + o = append(o, i) + return nil + }) + l.Close() + return o, err +} + +// FromWire deserializes a GetReplicationMessagesRequest struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a GetReplicationMessagesRequest struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v GetReplicationMessagesRequest +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *GetReplicationMessagesRequest) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TList { + v.Tokens, err = _List_ReplicationToken_Read(field.Value.GetList()) + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a GetReplicationMessagesRequest +// struct. +func (v *GetReplicationMessagesRequest) String() string { + if v == nil { + return "" + } + + var fields [1]string + i := 0 + if v.Tokens != nil { + fields[i] = fmt.Sprintf("Tokens: %v", v.Tokens) + i++ + } + + return fmt.Sprintf("GetReplicationMessagesRequest{%v}", strings.Join(fields[:i], ", ")) +} + +func _List_ReplicationToken_Equals(lhs, rhs []*ReplicationToken) bool { + if len(lhs) != len(rhs) { + return false + } + + for i, lv := range lhs { + rv := rhs[i] + if !lv.Equals(rv) { + return false + } + } + + return true +} + +// Equals returns true if all the fields of this GetReplicationMessagesRequest match the +// provided GetReplicationMessagesRequest. +// +// This function performs a deep comparison. +func (v *GetReplicationMessagesRequest) Equals(rhs *GetReplicationMessagesRequest) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.Tokens == nil && rhs.Tokens == nil) || (v.Tokens != nil && rhs.Tokens != nil && _List_ReplicationToken_Equals(v.Tokens, rhs.Tokens))) { + return false + } + + return true +} + +type _List_ReplicationToken_Zapper []*ReplicationToken + +// MarshalLogArray implements zapcore.ArrayMarshaler, enabling +// fast logging of _List_ReplicationToken_Zapper. +func (l _List_ReplicationToken_Zapper) MarshalLogArray(enc zapcore.ArrayEncoder) (err error) { + for _, v := range l { + err = multierr.Append(err, enc.AppendObject(v)) + } + return err +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of GetReplicationMessagesRequest. +func (v *GetReplicationMessagesRequest) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.Tokens != nil { + err = multierr.Append(err, enc.AddArray("tokens", (_List_ReplicationToken_Zapper)(v.Tokens))) + } + return err +} + +// GetTokens returns the value of Tokens if it is set or its +// zero value if it is unset. +func (v *GetReplicationMessagesRequest) GetTokens() (o []*ReplicationToken) { + if v != nil && v.Tokens != nil { + return v.Tokens + } + + return +} + +// IsSetTokens returns true if Tokens is not nil. +func (v *GetReplicationMessagesRequest) IsSetTokens() bool { + return v != nil && v.Tokens != nil +} + +type GetReplicationMessagesResponse struct { + MessagesByShard map[int32]*ReplicationMessages `json:"messagesByShard,omitempty"` +} + +type _Map_I32_ReplicationMessages_MapItemList map[int32]*ReplicationMessages + +func (m _Map_I32_ReplicationMessages_MapItemList) ForEach(f func(wire.MapItem) error) error { + for k, v := range m { + if v == nil { + return fmt.Errorf("invalid [%v]: value is nil", k) + } + kw, err := wire.NewValueI32(k), error(nil) + if err != nil { + return err + } + + vw, err := v.ToWire() + if err != nil { + return err + } + err = f(wire.MapItem{Key: kw, Value: vw}) + if err != nil { + return err + } + } + return nil +} + +func (m _Map_I32_ReplicationMessages_MapItemList) Size() int { + return len(m) +} + +func (_Map_I32_ReplicationMessages_MapItemList) KeyType() wire.Type { + return wire.TI32 +} + +func (_Map_I32_ReplicationMessages_MapItemList) ValueType() wire.Type { + return wire.TStruct +} + +func (_Map_I32_ReplicationMessages_MapItemList) Close() {} + +// ToWire translates a GetReplicationMessagesResponse struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *GetReplicationMessagesResponse) ToWire() (wire.Value, error) { + var ( + fields [1]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.MessagesByShard != nil { + w, err = wire.NewValueMap(_Map_I32_ReplicationMessages_MapItemList(v.MessagesByShard)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _ReplicationMessages_Read(w wire.Value) (*ReplicationMessages, error) { + var v ReplicationMessages + err := v.FromWire(w) + return &v, err +} + +func _Map_I32_ReplicationMessages_Read(m wire.MapItemList) (map[int32]*ReplicationMessages, error) { + if m.KeyType() != wire.TI32 { + return nil, nil + } + + if m.ValueType() != wire.TStruct { + return nil, nil + } + + o := make(map[int32]*ReplicationMessages, m.Size()) + err := m.ForEach(func(x wire.MapItem) error { + k, err := x.Key.GetI32(), error(nil) + if err != nil { + return err + } + + v, err := _ReplicationMessages_Read(x.Value) + if err != nil { + return err + } + + o[k] = v + return nil + }) + m.Close() + return o, err +} + +// FromWire deserializes a GetReplicationMessagesResponse struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a GetReplicationMessagesResponse struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v GetReplicationMessagesResponse +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *GetReplicationMessagesResponse) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TMap { + v.MessagesByShard, err = _Map_I32_ReplicationMessages_Read(field.Value.GetMap()) + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a GetReplicationMessagesResponse +// struct. +func (v *GetReplicationMessagesResponse) String() string { + if v == nil { + return "" + } + + var fields [1]string + i := 0 + if v.MessagesByShard != nil { + fields[i] = fmt.Sprintf("MessagesByShard: %v", v.MessagesByShard) + i++ + } + + return fmt.Sprintf("GetReplicationMessagesResponse{%v}", strings.Join(fields[:i], ", ")) +} + +func _Map_I32_ReplicationMessages_Equals(lhs, rhs map[int32]*ReplicationMessages) bool { + if len(lhs) != len(rhs) { + return false + } + + for lk, lv := range lhs { + rv, ok := rhs[lk] + if !ok { + return false + } + if !lv.Equals(rv) { + return false + } + } + return true +} + +// Equals returns true if all the fields of this GetReplicationMessagesResponse match the +// provided GetReplicationMessagesResponse. +// +// This function performs a deep comparison. +func (v *GetReplicationMessagesResponse) Equals(rhs *GetReplicationMessagesResponse) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.MessagesByShard == nil && rhs.MessagesByShard == nil) || (v.MessagesByShard != nil && rhs.MessagesByShard != nil && _Map_I32_ReplicationMessages_Equals(v.MessagesByShard, rhs.MessagesByShard))) { + return false + } + + return true +} + +type _Map_I32_ReplicationMessages_Item_Zapper struct { + Key int32 + Value *ReplicationMessages +} + +// MarshalLogArray implements zapcore.ArrayMarshaler, enabling +// fast logging of _Map_I32_ReplicationMessages_Item_Zapper. +func (v _Map_I32_ReplicationMessages_Item_Zapper) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + enc.AddInt32("key", v.Key) + err = multierr.Append(err, enc.AddObject("value", v.Value)) + return err +} + +type _Map_I32_ReplicationMessages_Zapper map[int32]*ReplicationMessages + +// MarshalLogArray implements zapcore.ArrayMarshaler, enabling +// fast logging of _Map_I32_ReplicationMessages_Zapper. +func (m _Map_I32_ReplicationMessages_Zapper) MarshalLogArray(enc zapcore.ArrayEncoder) (err error) { + for k, v := range m { + err = multierr.Append(err, enc.AppendObject(_Map_I32_ReplicationMessages_Item_Zapper{Key: k, Value: v})) + } + return err +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of GetReplicationMessagesResponse. +func (v *GetReplicationMessagesResponse) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.MessagesByShard != nil { + err = multierr.Append(err, enc.AddArray("messagesByShard", (_Map_I32_ReplicationMessages_Zapper)(v.MessagesByShard))) + } + return err +} + +// GetMessagesByShard returns the value of MessagesByShard if it is set or its +// zero value if it is unset. +func (v *GetReplicationMessagesResponse) GetMessagesByShard() (o map[int32]*ReplicationMessages) { + if v != nil && v.MessagesByShard != nil { + return v.MessagesByShard + } + + return +} + +// IsSetMessagesByShard returns true if MessagesByShard is not nil. +func (v *GetReplicationMessagesResponse) IsSetMessagesByShard() bool { + return v != nil && v.MessagesByShard != nil +} + type HistoryMetadataTaskAttributes struct { TargetClusters []string `json:"targetClusters,omitempty"` DomainId *string `json:"domainId,omitempty"` @@ -1804,76 +2267,380 @@ func (v *HistoryTaskAttributes) GetHistory() (o *shared.History) { return v.History } - return + return +} + +// IsSetHistory returns true if History is not nil. +func (v *HistoryTaskAttributes) IsSetHistory() bool { + return v != nil && v.History != nil +} + +// GetNewRunHistory returns the value of NewRunHistory if it is set or its +// zero value if it is unset. +func (v *HistoryTaskAttributes) GetNewRunHistory() (o *shared.History) { + if v != nil && v.NewRunHistory != nil { + return v.NewRunHistory + } + + return +} + +// IsSetNewRunHistory returns true if NewRunHistory is not nil. +func (v *HistoryTaskAttributes) IsSetNewRunHistory() bool { + return v != nil && v.NewRunHistory != nil +} + +// GetEventStoreVersion returns the value of EventStoreVersion if it is set or its +// zero value if it is unset. +func (v *HistoryTaskAttributes) GetEventStoreVersion() (o int32) { + if v != nil && v.EventStoreVersion != nil { + return *v.EventStoreVersion + } + + return +} + +// IsSetEventStoreVersion returns true if EventStoreVersion is not nil. +func (v *HistoryTaskAttributes) IsSetEventStoreVersion() bool { + return v != nil && v.EventStoreVersion != nil +} + +// GetNewRunEventStoreVersion returns the value of NewRunEventStoreVersion if it is set or its +// zero value if it is unset. +func (v *HistoryTaskAttributes) GetNewRunEventStoreVersion() (o int32) { + if v != nil && v.NewRunEventStoreVersion != nil { + return *v.NewRunEventStoreVersion + } + + return +} + +// IsSetNewRunEventStoreVersion returns true if NewRunEventStoreVersion is not nil. +func (v *HistoryTaskAttributes) IsSetNewRunEventStoreVersion() bool { + return v != nil && v.NewRunEventStoreVersion != nil +} + +// GetResetWorkflow returns the value of ResetWorkflow if it is set or its +// zero value if it is unset. +func (v *HistoryTaskAttributes) GetResetWorkflow() (o bool) { + if v != nil && v.ResetWorkflow != nil { + return *v.ResetWorkflow + } + + return +} + +// IsSetResetWorkflow returns true if ResetWorkflow is not nil. +func (v *HistoryTaskAttributes) IsSetResetWorkflow() bool { + return v != nil && v.ResetWorkflow != nil +} + +type ReplicationMessages struct { + ReplicationTasks []*ReplicationTask `json:"replicationTasks,omitempty"` + LastRetrivedMessageId *int64 `json:"lastRetrivedMessageId,omitempty"` + HasMore *bool `json:"hasMore,omitempty"` +} + +type _List_ReplicationTask_ValueList []*ReplicationTask + +func (v _List_ReplicationTask_ValueList) ForEach(f func(wire.Value) error) error { + for i, x := range v { + if x == nil { + return fmt.Errorf("invalid [%v]: value is nil", i) + } + w, err := x.ToWire() + if err != nil { + return err + } + err = f(w) + if err != nil { + return err + } + } + return nil +} + +func (v _List_ReplicationTask_ValueList) Size() int { + return len(v) +} + +func (_List_ReplicationTask_ValueList) ValueType() wire.Type { + return wire.TStruct +} + +func (_List_ReplicationTask_ValueList) Close() {} + +// ToWire translates a ReplicationMessages struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *ReplicationMessages) ToWire() (wire.Value, error) { + var ( + fields [3]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.ReplicationTasks != nil { + w, err = wire.NewValueList(_List_ReplicationTask_ValueList(v.ReplicationTasks)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + if v.LastRetrivedMessageId != nil { + w, err = wire.NewValueI64(*(v.LastRetrivedMessageId)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 20, Value: w} + i++ + } + if v.HasMore != nil { + w, err = wire.NewValueBool(*(v.HasMore)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 30, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _ReplicationTask_Read(w wire.Value) (*ReplicationTask, error) { + var v ReplicationTask + err := v.FromWire(w) + return &v, err +} + +func _List_ReplicationTask_Read(l wire.ValueList) ([]*ReplicationTask, error) { + if l.ValueType() != wire.TStruct { + return nil, nil + } + + o := make([]*ReplicationTask, 0, l.Size()) + err := l.ForEach(func(x wire.Value) error { + i, err := _ReplicationTask_Read(x) + if err != nil { + return err + } + o = append(o, i) + return nil + }) + l.Close() + return o, err +} + +// FromWire deserializes a ReplicationMessages struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a ReplicationMessages struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v ReplicationMessages +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *ReplicationMessages) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TList { + v.ReplicationTasks, err = _List_ReplicationTask_Read(field.Value.GetList()) + if err != nil { + return err + } + + } + case 20: + if field.Value.Type() == wire.TI64 { + var x int64 + x, err = field.Value.GetI64(), error(nil) + v.LastRetrivedMessageId = &x + if err != nil { + return err + } + + } + case 30: + if field.Value.Type() == wire.TBool { + var x bool + x, err = field.Value.GetBool(), error(nil) + v.HasMore = &x + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a ReplicationMessages +// struct. +func (v *ReplicationMessages) String() string { + if v == nil { + return "" + } + + var fields [3]string + i := 0 + if v.ReplicationTasks != nil { + fields[i] = fmt.Sprintf("ReplicationTasks: %v", v.ReplicationTasks) + i++ + } + if v.LastRetrivedMessageId != nil { + fields[i] = fmt.Sprintf("LastRetrivedMessageId: %v", *(v.LastRetrivedMessageId)) + i++ + } + if v.HasMore != nil { + fields[i] = fmt.Sprintf("HasMore: %v", *(v.HasMore)) + i++ + } + + return fmt.Sprintf("ReplicationMessages{%v}", strings.Join(fields[:i], ", ")) +} + +func _List_ReplicationTask_Equals(lhs, rhs []*ReplicationTask) bool { + if len(lhs) != len(rhs) { + return false + } + + for i, lv := range lhs { + rv := rhs[i] + if !lv.Equals(rv) { + return false + } + } + + return true } -// IsSetHistory returns true if History is not nil. -func (v *HistoryTaskAttributes) IsSetHistory() bool { - return v != nil && v.History != nil +// Equals returns true if all the fields of this ReplicationMessages match the +// provided ReplicationMessages. +// +// This function performs a deep comparison. +func (v *ReplicationMessages) Equals(rhs *ReplicationMessages) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.ReplicationTasks == nil && rhs.ReplicationTasks == nil) || (v.ReplicationTasks != nil && rhs.ReplicationTasks != nil && _List_ReplicationTask_Equals(v.ReplicationTasks, rhs.ReplicationTasks))) { + return false + } + if !_I64_EqualsPtr(v.LastRetrivedMessageId, rhs.LastRetrivedMessageId) { + return false + } + if !_Bool_EqualsPtr(v.HasMore, rhs.HasMore) { + return false + } + + return true } -// GetNewRunHistory returns the value of NewRunHistory if it is set or its -// zero value if it is unset. -func (v *HistoryTaskAttributes) GetNewRunHistory() (o *shared.History) { - if v != nil && v.NewRunHistory != nil { - return v.NewRunHistory - } +type _List_ReplicationTask_Zapper []*ReplicationTask - return +// MarshalLogArray implements zapcore.ArrayMarshaler, enabling +// fast logging of _List_ReplicationTask_Zapper. +func (l _List_ReplicationTask_Zapper) MarshalLogArray(enc zapcore.ArrayEncoder) (err error) { + for _, v := range l { + err = multierr.Append(err, enc.AppendObject(v)) + } + return err } -// IsSetNewRunHistory returns true if NewRunHistory is not nil. -func (v *HistoryTaskAttributes) IsSetNewRunHistory() bool { - return v != nil && v.NewRunHistory != nil +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of ReplicationMessages. +func (v *ReplicationMessages) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.ReplicationTasks != nil { + err = multierr.Append(err, enc.AddArray("replicationTasks", (_List_ReplicationTask_Zapper)(v.ReplicationTasks))) + } + if v.LastRetrivedMessageId != nil { + enc.AddInt64("lastRetrivedMessageId", *v.LastRetrivedMessageId) + } + if v.HasMore != nil { + enc.AddBool("hasMore", *v.HasMore) + } + return err } -// GetEventStoreVersion returns the value of EventStoreVersion if it is set or its +// GetReplicationTasks returns the value of ReplicationTasks if it is set or its // zero value if it is unset. -func (v *HistoryTaskAttributes) GetEventStoreVersion() (o int32) { - if v != nil && v.EventStoreVersion != nil { - return *v.EventStoreVersion +func (v *ReplicationMessages) GetReplicationTasks() (o []*ReplicationTask) { + if v != nil && v.ReplicationTasks != nil { + return v.ReplicationTasks } return } -// IsSetEventStoreVersion returns true if EventStoreVersion is not nil. -func (v *HistoryTaskAttributes) IsSetEventStoreVersion() bool { - return v != nil && v.EventStoreVersion != nil +// IsSetReplicationTasks returns true if ReplicationTasks is not nil. +func (v *ReplicationMessages) IsSetReplicationTasks() bool { + return v != nil && v.ReplicationTasks != nil } -// GetNewRunEventStoreVersion returns the value of NewRunEventStoreVersion if it is set or its +// GetLastRetrivedMessageId returns the value of LastRetrivedMessageId if it is set or its // zero value if it is unset. -func (v *HistoryTaskAttributes) GetNewRunEventStoreVersion() (o int32) { - if v != nil && v.NewRunEventStoreVersion != nil { - return *v.NewRunEventStoreVersion +func (v *ReplicationMessages) GetLastRetrivedMessageId() (o int64) { + if v != nil && v.LastRetrivedMessageId != nil { + return *v.LastRetrivedMessageId } return } -// IsSetNewRunEventStoreVersion returns true if NewRunEventStoreVersion is not nil. -func (v *HistoryTaskAttributes) IsSetNewRunEventStoreVersion() bool { - return v != nil && v.NewRunEventStoreVersion != nil +// IsSetLastRetrivedMessageId returns true if LastRetrivedMessageId is not nil. +func (v *ReplicationMessages) IsSetLastRetrivedMessageId() bool { + return v != nil && v.LastRetrivedMessageId != nil } -// GetResetWorkflow returns the value of ResetWorkflow if it is set or its +// GetHasMore returns the value of HasMore if it is set or its // zero value if it is unset. -func (v *HistoryTaskAttributes) GetResetWorkflow() (o bool) { - if v != nil && v.ResetWorkflow != nil { - return *v.ResetWorkflow +func (v *ReplicationMessages) GetHasMore() (o bool) { + if v != nil && v.HasMore != nil { + return *v.HasMore } return } -// IsSetResetWorkflow returns true if ResetWorkflow is not nil. -func (v *HistoryTaskAttributes) IsSetResetWorkflow() bool { - return v != nil && v.ResetWorkflow != nil +// IsSetHasMore returns true if HasMore is not nil. +func (v *ReplicationMessages) IsSetHasMore() bool { + return v != nil && v.HasMore != nil } type ReplicationTask struct { TaskType *ReplicationTaskType `json:"taskType,omitempty"` + SourceTaskId *int64 `json:"sourceTaskId,omitempty"` DomainTaskAttributes *DomainTaskAttributes `json:"domainTaskAttributes,omitempty"` HistoryTaskAttributes *HistoryTaskAttributes `json:"historyTaskAttributes,omitempty"` SyncShardStatusTaskAttributes *SyncShardStatusTaskAttributes `json:"syncShardStatusTaskAttributes,omitempty"` @@ -1898,7 +2665,7 @@ type ReplicationTask struct { // } func (v *ReplicationTask) ToWire() (wire.Value, error) { var ( - fields [6]wire.Field + fields [7]wire.Field i int = 0 w wire.Value err error @@ -1912,6 +2679,14 @@ func (v *ReplicationTask) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 10, Value: w} i++ } + if v.SourceTaskId != nil { + w, err = wire.NewValueI64(*(v.SourceTaskId)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 11, Value: w} + i++ + } if v.DomainTaskAttributes != nil { w, err = v.DomainTaskAttributes.ToWire() if err != nil { @@ -2023,6 +2798,16 @@ func (v *ReplicationTask) FromWire(w wire.Value) error { return err } + } + case 11: + if field.Value.Type() == wire.TI64 { + var x int64 + x, err = field.Value.GetI64(), error(nil) + v.SourceTaskId = &x + if err != nil { + return err + } + } case 20: if field.Value.Type() == wire.TStruct { @@ -2077,12 +2862,16 @@ func (v *ReplicationTask) String() string { return "" } - var fields [6]string + var fields [7]string i := 0 if v.TaskType != nil { fields[i] = fmt.Sprintf("TaskType: %v", *(v.TaskType)) i++ } + if v.SourceTaskId != nil { + fields[i] = fmt.Sprintf("SourceTaskId: %v", *(v.SourceTaskId)) + i++ + } if v.DomainTaskAttributes != nil { fields[i] = fmt.Sprintf("DomainTaskAttributes: %v", v.DomainTaskAttributes) i++ @@ -2130,6 +2919,9 @@ func (v *ReplicationTask) Equals(rhs *ReplicationTask) bool { if !_ReplicationTaskType_EqualsPtr(v.TaskType, rhs.TaskType) { return false } + if !_I64_EqualsPtr(v.SourceTaskId, rhs.SourceTaskId) { + return false + } if !((v.DomainTaskAttributes == nil && rhs.DomainTaskAttributes == nil) || (v.DomainTaskAttributes != nil && rhs.DomainTaskAttributes != nil && v.DomainTaskAttributes.Equals(rhs.DomainTaskAttributes))) { return false } @@ -2158,6 +2950,9 @@ func (v *ReplicationTask) MarshalLogObject(enc zapcore.ObjectEncoder) (err error if v.TaskType != nil { err = multierr.Append(err, enc.AddObject("taskType", *v.TaskType)) } + if v.SourceTaskId != nil { + enc.AddInt64("sourceTaskId", *v.SourceTaskId) + } if v.DomainTaskAttributes != nil { err = multierr.Append(err, enc.AddObject("domainTaskAttributes", v.DomainTaskAttributes)) } @@ -2191,6 +2986,21 @@ func (v *ReplicationTask) IsSetTaskType() bool { return v != nil && v.TaskType != nil } +// GetSourceTaskId returns the value of SourceTaskId if it is set or its +// zero value if it is unset. +func (v *ReplicationTask) GetSourceTaskId() (o int64) { + if v != nil && v.SourceTaskId != nil { + return *v.SourceTaskId + } + + return +} + +// IsSetSourceTaskId returns true if SourceTaskId is not nil. +func (v *ReplicationTask) IsSetSourceTaskId() bool { + return v != nil && v.SourceTaskId != nil +} + // GetDomainTaskAttributes returns the value of DomainTaskAttributes if it is set or its // zero value if it is unset. func (v *ReplicationTask) GetDomainTaskAttributes() (o *DomainTaskAttributes) { @@ -2476,6 +3286,232 @@ func (v *ReplicationTaskType) UnmarshalJSON(text []byte) error { } } +type ReplicationToken struct { + ShardID *int32 `json:"shardID,omitempty"` + LastRetrivedMessageId *int64 `json:"lastRetrivedMessageId,omitempty"` + LastProcessedMessageId *int64 `json:"lastProcessedMessageId,omitempty"` +} + +// ToWire translates a ReplicationToken struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *ReplicationToken) ToWire() (wire.Value, error) { + var ( + fields [3]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.ShardID != nil { + w, err = wire.NewValueI32(*(v.ShardID)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + if v.LastRetrivedMessageId != nil { + w, err = wire.NewValueI64(*(v.LastRetrivedMessageId)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 20, Value: w} + i++ + } + if v.LastProcessedMessageId != nil { + w, err = wire.NewValueI64(*(v.LastProcessedMessageId)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 30, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +// FromWire deserializes a ReplicationToken struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a ReplicationToken struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v ReplicationToken +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *ReplicationToken) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TI32 { + var x int32 + x, err = field.Value.GetI32(), error(nil) + v.ShardID = &x + if err != nil { + return err + } + + } + case 20: + if field.Value.Type() == wire.TI64 { + var x int64 + x, err = field.Value.GetI64(), error(nil) + v.LastRetrivedMessageId = &x + if err != nil { + return err + } + + } + case 30: + if field.Value.Type() == wire.TI64 { + var x int64 + x, err = field.Value.GetI64(), error(nil) + v.LastProcessedMessageId = &x + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a ReplicationToken +// struct. +func (v *ReplicationToken) String() string { + if v == nil { + return "" + } + + var fields [3]string + i := 0 + if v.ShardID != nil { + fields[i] = fmt.Sprintf("ShardID: %v", *(v.ShardID)) + i++ + } + if v.LastRetrivedMessageId != nil { + fields[i] = fmt.Sprintf("LastRetrivedMessageId: %v", *(v.LastRetrivedMessageId)) + i++ + } + if v.LastProcessedMessageId != nil { + fields[i] = fmt.Sprintf("LastProcessedMessageId: %v", *(v.LastProcessedMessageId)) + i++ + } + + return fmt.Sprintf("ReplicationToken{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this ReplicationToken match the +// provided ReplicationToken. +// +// This function performs a deep comparison. +func (v *ReplicationToken) Equals(rhs *ReplicationToken) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !_I32_EqualsPtr(v.ShardID, rhs.ShardID) { + return false + } + if !_I64_EqualsPtr(v.LastRetrivedMessageId, rhs.LastRetrivedMessageId) { + return false + } + if !_I64_EqualsPtr(v.LastProcessedMessageId, rhs.LastProcessedMessageId) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of ReplicationToken. +func (v *ReplicationToken) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.ShardID != nil { + enc.AddInt32("shardID", *v.ShardID) + } + if v.LastRetrivedMessageId != nil { + enc.AddInt64("lastRetrivedMessageId", *v.LastRetrivedMessageId) + } + if v.LastProcessedMessageId != nil { + enc.AddInt64("lastProcessedMessageId", *v.LastProcessedMessageId) + } + return err +} + +// GetShardID returns the value of ShardID if it is set or its +// zero value if it is unset. +func (v *ReplicationToken) GetShardID() (o int32) { + if v != nil && v.ShardID != nil { + return *v.ShardID + } + + return +} + +// IsSetShardID returns true if ShardID is not nil. +func (v *ReplicationToken) IsSetShardID() bool { + return v != nil && v.ShardID != nil +} + +// GetLastRetrivedMessageId returns the value of LastRetrivedMessageId if it is set or its +// zero value if it is unset. +func (v *ReplicationToken) GetLastRetrivedMessageId() (o int64) { + if v != nil && v.LastRetrivedMessageId != nil { + return *v.LastRetrivedMessageId + } + + return +} + +// IsSetLastRetrivedMessageId returns true if LastRetrivedMessageId is not nil. +func (v *ReplicationToken) IsSetLastRetrivedMessageId() bool { + return v != nil && v.LastRetrivedMessageId != nil +} + +// GetLastProcessedMessageId returns the value of LastProcessedMessageId if it is set or its +// zero value if it is unset. +func (v *ReplicationToken) GetLastProcessedMessageId() (o int64) { + if v != nil && v.LastProcessedMessageId != nil { + return *v.LastProcessedMessageId + } + + return +} + +// IsSetLastProcessedMessageId returns true if LastProcessedMessageId is not nil. +func (v *ReplicationToken) IsSetLastProcessedMessageId() bool { + return v != nil && v.LastProcessedMessageId != nil +} + type SyncActicvityTaskAttributes struct { DomainId *string `json:"domainId,omitempty"` WorkflowId *string `json:"workflowId,omitempty"` @@ -3371,12 +4407,11 @@ var ThriftModule = &thriftreflect.ThriftModule{ Name: "replicator", Package: "github.com/uber/cadence/.gen/go/replicator", FilePath: "replicator.thrift", - SHA1: "56bb8db140cdc19594a11ab287979e60aacfff25", + SHA1: "9fa6a718103b310e320bf7c3f1828f68c0e579bb", Includes: []*thriftreflect.ThriftModule{ - history.ThriftModule, shared.ThriftModule, }, Raw: rawIDL, } -const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence.replicator\n\ninclude \"shared.thrift\"\ninclude \"history.thrift\"\n\nenum ReplicationTaskType {\n Domain\n History\n SyncShardStatus\n SyncActivity\n HistoryMetadata\n}\n\nenum DomainOperation {\n Create\n Update\n}\n\nstruct DomainTaskAttributes {\n 05: optional DomainOperation domainOperation\n 10: optional string id\n 20: optional shared.DomainInfo info\n 30: optional shared.DomainConfiguration config\n 40: optional shared.DomainReplicationConfiguration replicationConfig\n 50: optional i64 (js.type = \"Long\") configVersion\n 60: optional i64 (js.type = \"Long\") failoverVersion\n}\n\nstruct HistoryTaskAttributes {\n 05: optional list targetClusters\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") firstEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") version\n 70: optional map replicationInfo\n 80: optional shared.History history\n 90: optional shared.History newRunHistory\n 100: optional i32 eventStoreVersion\n 110: optional i32 newRunEventStoreVersion\n 120: optional bool resetWorkflow\n}\n\nstruct HistoryMetadataTaskAttributes {\n 05: optional list targetClusters\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") firstEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n}\n\nstruct SyncShardStatusTaskAttributes {\n 10: optional string sourceCluster\n 20: optional i64 (js.type = \"Long\") shardId\n 30: optional i64 (js.type = \"Long\") timestamp\n}\n\nstruct SyncActicvityTaskAttributes {\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") version\n 50: optional i64 (js.type = \"Long\") scheduledId\n 60: optional i64 (js.type = \"Long\") scheduledTime\n 70: optional i64 (js.type = \"Long\") startedId\n 80: optional i64 (js.type = \"Long\") startedTime\n 90: optional i64 (js.type = \"Long\") lastHeartbeatTime\n 100: optional binary details\n 110: optional i32 attempt\n 120: optional string lastFailureReason\n 130: optional string lastWorkerIdentity\n}\n\nstruct ReplicationTask {\n 10: optional ReplicationTaskType taskType\n 20: optional DomainTaskAttributes domainTaskAttributes\n 30: optional HistoryTaskAttributes historyTaskAttributes\n 40: optional SyncShardStatusTaskAttributes syncShardStatusTaskAttributes\n 50: optional SyncActicvityTaskAttributes syncActicvityTaskAttributes\n 60: optional HistoryMetadataTaskAttributes historyMetadataTaskAttributes\n}\n\n" +const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence.replicator\n\ninclude \"shared.thrift\"\n\nenum ReplicationTaskType {\n Domain\n History\n SyncShardStatus\n SyncActivity\n HistoryMetadata\n}\n\nenum DomainOperation {\n Create\n Update\n}\n\nstruct DomainTaskAttributes {\n 05: optional DomainOperation domainOperation\n 10: optional string id\n 20: optional shared.DomainInfo info\n 30: optional shared.DomainConfiguration config\n 40: optional shared.DomainReplicationConfiguration replicationConfig\n 50: optional i64 (js.type = \"Long\") configVersion\n 60: optional i64 (js.type = \"Long\") failoverVersion\n}\n\nstruct HistoryTaskAttributes {\n 05: optional list targetClusters\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") firstEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") version\n 70: optional map replicationInfo\n 80: optional shared.History history\n 90: optional shared.History newRunHistory\n 100: optional i32 eventStoreVersion\n 110: optional i32 newRunEventStoreVersion\n 120: optional bool resetWorkflow\n}\n\nstruct HistoryMetadataTaskAttributes {\n 05: optional list targetClusters\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") firstEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n}\n\nstruct SyncShardStatusTaskAttributes {\n 10: optional string sourceCluster\n 20: optional i64 (js.type = \"Long\") shardId\n 30: optional i64 (js.type = \"Long\") timestamp\n}\n\nstruct SyncActicvityTaskAttributes {\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") version\n 50: optional i64 (js.type = \"Long\") scheduledId\n 60: optional i64 (js.type = \"Long\") scheduledTime\n 70: optional i64 (js.type = \"Long\") startedId\n 80: optional i64 (js.type = \"Long\") startedTime\n 90: optional i64 (js.type = \"Long\") lastHeartbeatTime\n 100: optional binary details\n 110: optional i32 attempt\n 120: optional string lastFailureReason\n 130: optional string lastWorkerIdentity\n}\n\nstruct ReplicationTask {\n 10: optional ReplicationTaskType taskType\n 11: optional i64 (js.type = \"Long\") sourceTaskId\n 20: optional DomainTaskAttributes domainTaskAttributes\n 30: optional HistoryTaskAttributes historyTaskAttributes\n 40: optional SyncShardStatusTaskAttributes syncShardStatusTaskAttributes\n 50: optional SyncActicvityTaskAttributes syncActicvityTaskAttributes\n 60: optional HistoryMetadataTaskAttributes historyMetadataTaskAttributes\n}\n\nstruct ReplicationToken {\n 10: optional i32 shardID\n // lastRetrivedMessageId is where the next fetch should begin with\n 20: optional i64 (js.type = \"Long\") lastRetrivedMessageId\n // lastProcessedMessageId is the last messageId that is processed on the passive side.\n // This can be different than lastRetrivedMessageId if passive side supports prefetching messages.\n 30: optional i64 (js.type = \"Long\") lastProcessedMessageId\n}\n\nstruct ReplicationMessages {\n 10: optional list replicationTasks\n // This can be different than the last taskId in the above list, because sender can decide to skip tasks (e.g. for completed workflows).\n 20: optional i64 (js.type = \"Long\") lastRetrivedMessageId\n 30: optional bool hasMore // Hint for flow control\n}\n\nstruct GetReplicationMessagesRequest {\n 10: optional list tokens\n}\n\nstruct GetReplicationMessagesResponse {\n 10: optional map messagesByShard\n}" diff --git a/.gen/go/shared/shared.go b/.gen/go/shared/shared.go index ca0e227d0b5..203f011c340 100644 --- a/.gen/go/shared/shared.go +++ b/.gen/go/shared/shared.go @@ -20,7 +20,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -// Code generated by thriftrw v1.20.0. DO NOT EDIT. +// Code generated by thriftrw v1.20.1. DO NOT EDIT. // @generated package shared diff --git a/.gen/go/sqlblobs/sqlblobs.go b/.gen/go/sqlblobs/sqlblobs.go index 6d49db15bfe..4537ef9c3f5 100644 --- a/.gen/go/sqlblobs/sqlblobs.go +++ b/.gen/go/sqlblobs/sqlblobs.go @@ -20,7 +20,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -// Code generated by thriftrw v1.20.0. DO NOT EDIT. +// Code generated by thriftrw v1.20.1. DO NOT EDIT. // @generated package sqlblobs @@ -4771,6 +4771,7 @@ type ShardInfo struct { ClusterTransferAckLevel map[string]int64 `json:"clusterTransferAckLevel,omitempty"` ClusterTimerAckLevel map[string]int64 `json:"clusterTimerAckLevel,omitempty"` Owner *string `json:"owner,omitempty"` + ClusterReplicationLevel map[string]int64 `json:"clusterReplicationLevel,omitempty"` } type _Map_String_I64_MapItemList map[string]int64 @@ -4825,7 +4826,7 @@ func (_Map_String_I64_MapItemList) Close() {} // } func (v *ShardInfo) ToWire() (wire.Value, error) { var ( - fields [9]wire.Field + fields [10]wire.Field i int = 0 w wire.Value err error @@ -4903,6 +4904,14 @@ func (v *ShardInfo) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 38, Value: w} i++ } + if v.ClusterReplicationLevel != nil { + w, err = wire.NewValueMap(_Map_String_I64_MapItemList(v.ClusterReplicationLevel)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 40, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -5042,6 +5051,14 @@ func (v *ShardInfo) FromWire(w wire.Value) error { return err } + } + case 40: + if field.Value.Type() == wire.TMap { + v.ClusterReplicationLevel, err = _Map_String_I64_Read(field.Value.GetMap()) + if err != nil { + return err + } + } } } @@ -5056,7 +5073,7 @@ func (v *ShardInfo) String() string { return "" } - var fields [9]string + var fields [10]string i := 0 if v.StolenSinceRenew != nil { fields[i] = fmt.Sprintf("StolenSinceRenew: %v", *(v.StolenSinceRenew)) @@ -5094,6 +5111,10 @@ func (v *ShardInfo) String() string { fields[i] = fmt.Sprintf("Owner: %v", *(v.Owner)) i++ } + if v.ClusterReplicationLevel != nil { + fields[i] = fmt.Sprintf("ClusterReplicationLevel: %v", v.ClusterReplicationLevel) + i++ + } return fmt.Sprintf("ShardInfo{%v}", strings.Join(fields[:i], ", ")) } @@ -5152,6 +5173,9 @@ func (v *ShardInfo) Equals(rhs *ShardInfo) bool { if !_String_EqualsPtr(v.Owner, rhs.Owner) { return false } + if !((v.ClusterReplicationLevel == nil && rhs.ClusterReplicationLevel == nil) || (v.ClusterReplicationLevel != nil && rhs.ClusterReplicationLevel != nil && _Map_String_I64_Equals(v.ClusterReplicationLevel, rhs.ClusterReplicationLevel))) { + return false + } return true } @@ -5200,6 +5224,9 @@ func (v *ShardInfo) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { if v.Owner != nil { enc.AddString("owner", *v.Owner) } + if v.ClusterReplicationLevel != nil { + err = multierr.Append(err, enc.AddObject("clusterReplicationLevel", (_Map_String_I64_Zapper)(v.ClusterReplicationLevel))) + } return err } @@ -5338,6 +5365,21 @@ func (v *ShardInfo) IsSetOwner() bool { return v != nil && v.Owner != nil } +// GetClusterReplicationLevel returns the value of ClusterReplicationLevel if it is set or its +// zero value if it is unset. +func (v *ShardInfo) GetClusterReplicationLevel() (o map[string]int64) { + if v != nil && v.ClusterReplicationLevel != nil { + return v.ClusterReplicationLevel + } + + return +} + +// IsSetClusterReplicationLevel returns true if ClusterReplicationLevel is not nil. +func (v *ShardInfo) IsSetClusterReplicationLevel() bool { + return v != nil && v.ClusterReplicationLevel != nil +} + type SignalInfo struct { Version *int64 `json:"version,omitempty"` RequestID *string `json:"requestID,omitempty"` @@ -10281,11 +10323,11 @@ var ThriftModule = &thriftreflect.ThriftModule{ Name: "sqlblobs", Package: "github.com/uber/cadence/.gen/go/sqlblobs", FilePath: "sqlblobs.thrift", - SHA1: "ee1f89af3bb338ca350de62f7e446f91cb50bdf3", + SHA1: "9217ed60f157ab4ccd8cc77d94f21eb0726d3aa3", Includes: []*thriftreflect.ThriftModule{ shared.ThriftModule, }, Raw: rawIDL, } -const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence.sqlblobs\n\ninclude \"shared.thrift\"\n\nstruct ShardInfo {\n 10: optional i32 stolenSinceRenew\n 12: optional i64 (js.type = \"Long\") updatedAtNanos\n 14: optional i64 (js.type = \"Long\") replicationAckLevel\n 16: optional i64 (js.type = \"Long\") transferAckLevel\n 18: optional i64 (js.type = \"Long\") timerAckLevelNanos\n 24: optional i64 (js.type = \"Long\") domainNotificationVersion\n 34: optional map clusterTransferAckLevel\n 36: optional map clusterTimerAckLevel\n 38: optional string owner\n}\n\nstruct DomainInfo {\n 10: optional string name\n 12: optional string description\n 14: optional string owner\n 16: optional i32 status\n 18: optional i16 retentionDays\n 20: optional bool emitMetric\n 22: optional string archivalBucket\n 24: optional i16 archivalStatus\n 26: optional i64 (js.type = \"Long\") configVersion\n 28: optional i64 (js.type = \"Long\") notificationVersion\n 30: optional i64 (js.type = \"Long\") failoverNotificationVersion\n 32: optional i64 (js.type = \"Long\") failoverVersion\n 34: optional string activeClusterName\n 36: optional list clusters\n 38: optional map data\n 39: optional binary badBinaries\n 40: optional string badBinariesEncoding\n 42: optional i16 historyArchivalStatus\n 44: optional string historyArchivalURI\n 46: optional i16 visibilityArchivalStatus\n 48: optional string visibilityArchivalURI\n}\n\nstruct HistoryTreeInfo {\n 10: optional i64 (js.type = \"Long\") createdTimeNanos // For fork operation to prevent race condition of leaking event data when forking branches fail. Also can be used for clean up leaked data\n 12: optional list ancestors\n 14: optional string info // For lookup back to workflow during debugging, also background cleanup when fork operation cannot finish self cleanup due to crash.\n}\n\nstruct ReplicationInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") lastEventID\n}\n\nstruct WorkflowExecutionInfo {\n 10: optional binary parentDomainID\n 12: optional string parentWorkflowID\n 14: optional binary parentRunID\n 16: optional i64 (js.type = \"Long\") initiatedID\n 18: optional i64 (js.type = \"Long\") completionEventBatchID\n 20: optional binary completionEvent\n 22: optional string completionEventEncoding\n 24: optional string taskList\n 26: optional string workflowTypeName\n 28: optional i32 workflowTimeoutSeconds\n 30: optional i32 decisionTaskTimeoutSeconds\n 32: optional binary executionContext\n 34: optional i32 state\n 36: optional i32 closeStatus\n 38: optional i64 (js.type = \"Long\") startVersion\n 40: optional i64 (js.type = \"Long\") currentVersion\n 44: optional i64 (js.type = \"Long\") lastWriteEventID\n 46: optional map lastReplicationInfo\n 48: optional i64 (js.type = \"Long\") lastEventTaskID\n 50: optional i64 (js.type = \"Long\") lastFirstEventID\n 52: optional i64 (js.type = \"Long\") lastProcessedEvent\n 54: optional i64 (js.type = \"Long\") startTimeNanos\n 56: optional i64 (js.type = \"Long\") lastUpdatedTimeNanos\n 58: optional i64 (js.type = \"Long\") decisionVersion\n 60: optional i64 (js.type = \"Long\") decisionScheduleID\n 62: optional i64 (js.type = \"Long\") decisionStartedID\n 64: optional i32 decisionTimeout\n 66: optional i64 (js.type = \"Long\") decisionAttempt\n 68: optional i64 (js.type = \"Long\") decisionStartedTimestampNanos\n 69: optional i64 (js.type = \"Long\") decisionScheduledTimestampNanos\n 70: optional bool cancelRequested\n 71: optional i64 (js.type = \"Long\") decisionOriginalScheduledTimestampNanos\n 72: optional string createRequestID\n 74: optional string decisionRequestID\n 76: optional string cancelRequestID\n 78: optional string stickyTaskList\n 80: optional i64 (js.type = \"Long\") stickyScheduleToStartTimeout\n 82: optional i64 (js.type = \"Long\") retryAttempt\n 84: optional i32 retryInitialIntervalSeconds\n 86: optional i32 retryMaximumIntervalSeconds\n 88: optional i32 retryMaximumAttempts\n 90: optional i32 retryExpirationSeconds\n 92: optional double retryBackoffCoefficient\n 94: optional i64 (js.type = \"Long\") retryExpirationTimeNanos\n 96: optional list retryNonRetryableErrors\n 98: optional bool hasRetryPolicy\n 100: optional string cronSchedule\n 102: optional i32 eventStoreVersion\n 104: optional binary eventBranchToken\n 106: optional i64 (js.type = \"Long\") signalCount\n 108: optional i64 (js.type = \"Long\") historySize\n 110: optional string clientLibraryVersion\n 112: optional string clientFeatureVersion\n 114: optional string clientImpl\n 115: optional binary autoResetPoints\n 116: optional string autoResetPointsEncoding\n 118: optional map searchAttributes\n 120: optional map memo\n}\n\nstruct ActivityInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") scheduledEventBatchID\n 14: optional binary scheduledEvent\n 16: optional string scheduledEventEncoding\n 18: optional i64 (js.type = \"Long\") scheduledTimeNanos\n 20: optional i64 (js.type = \"Long\") startedID\n 22: optional binary startedEvent\n 24: optional string startedEventEncoding\n 26: optional i64 (js.type = \"Long\") startedTimeNanos\n 28: optional string activityID\n 30: optional string requestID\n 32: optional i32 scheduleToStartTimeoutSeconds\n 34: optional i32 scheduleToCloseTimeoutSeconds\n 36: optional i32 startToCloseTimeoutSeconds\n 38: optional i32 heartbeatTimeoutSeconds\n 40: optional bool cancelRequested\n 42: optional i64 (js.type = \"Long\") cancelRequestID\n 44: optional i32 timerTaskStatus\n 46: optional i32 attempt\n 48: optional string taskList\n 50: optional string startedIdentity\n 52: optional bool hasRetryPolicy\n 54: optional i32 retryInitialIntervalSeconds\n 56: optional i32 retryMaximumIntervalSeconds\n 58: optional i32 retryMaximumAttempts\n 60: optional i64 (js.type = \"Long\") retryExpirationTimeNanos\n 62: optional double retryBackoffCoefficient\n 64: optional list retryNonRetryableErrors\n 66: optional string retryLastFailureReason\n 68: optional string retryLastWorkerIdentity\n}\n\nstruct ChildExecutionInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") initiatedEventBatchID\n 14: optional i64 (js.type = \"Long\") startedID\n 16: optional binary initiatedEvent\n 18: optional string initiatedEventEncoding\n 20: optional string startedWorkflowID\n 22: optional binary startedRunID\n 24: optional binary startedEvent\n 26: optional string startedEventEncoding\n 28: optional string createRequestID\n 30: optional string domainName\n 32: optional string workflowTypeName\n}\n\nstruct SignalInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional string requestID\n 14: optional string name\n 16: optional binary input\n 18: optional binary control\n}\n\nstruct RequestCancelInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional string cancelRequestID\n}\n\nstruct TimerInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") startedID\n 14: optional i64 (js.type = \"Long\") expiryTimeNanos\n 16: optional i64 (js.type = \"Long\") taskID\n}\n\nstruct TaskInfo {\n 10: optional string workflowID\n 12: optional binary runID\n 13: optional i64 (js.type = \"Long\") scheduleID\n 14: optional i64 (js.type = \"Long\") expiryTimeNanos\n 15: optional i64 (js.type = \"Long\") createdTimeNanos\n}\n\nstruct TaskListInfo {\n 10: optional i16 kind // {Normal, Sticky}\n 12: optional i64 (js.type = \"Long\") ackLevel\n 14: optional i64 (js.type = \"Long\") expiryTimeNanos\n 16: optional i64 (js.type = \"Long\") lastUpdatedNanos\n}\n\nstruct TransferTaskInfo {\n 10: optional binary domainID\n 12: optional string workflowID\n 14: optional binary runID\n 16: optional i16 taskType\n 18: optional binary targetDomainID\n 20: optional string targetWorkflowID\n 22: optional binary targetRunID\n 24: optional string taskList\n 26: optional bool targetChildWorkflowOnly\n 28: optional i64 (js.type = \"Long\") scheduleID\n 30: optional i64 (js.type = \"Long\") version\n 32: optional i64 (js.type = \"Long\") visibilityTimestampNanos\n}\n\nstruct TimerTaskInfo {\n 10: optional binary domainID\n 12: optional string workflowID\n 14: optional binary runID\n 16: optional i16 taskType\n 18: optional i16 timeoutType\n 20: optional i64 (js.type = \"Long\") version\n 22: optional i64 (js.type = \"Long\") scheduleAttempt\n 24: optional i64 (js.type = \"Long\") eventID\n}\n\nstruct ReplicationTaskInfo {\n 10: optional binary domainID\n 12: optional string workflowID\n 14: optional binary runID\n 16: optional i16 taskType\n 18: optional i64 (js.type = \"Long\") version\n 20: optional i64 (js.type = \"Long\") firstEventID\n 22: optional i64 (js.type = \"Long\") nextEventID\n 24: optional i64 (js.type = \"Long\") scheduledID\n 26: optional i32 eventStoreVersion\n 28: optional i32 newRunEventStoreVersion\n 30: optional binary branch_token\n 32: optional map lastReplicationInfo\n 34: optional binary newRunBranchToken\n 36: optional bool resetWorkflow\n}" +const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence.sqlblobs\n\ninclude \"shared.thrift\"\n\nstruct ShardInfo {\n 10: optional i32 stolenSinceRenew\n 12: optional i64 (js.type = \"Long\") updatedAtNanos\n 14: optional i64 (js.type = \"Long\") replicationAckLevel\n 16: optional i64 (js.type = \"Long\") transferAckLevel\n 18: optional i64 (js.type = \"Long\") timerAckLevelNanos\n 24: optional i64 (js.type = \"Long\") domainNotificationVersion\n 34: optional map clusterTransferAckLevel\n 36: optional map clusterTimerAckLevel\n 38: optional string owner\n 40: optional map clusterReplicationLevel\n}\n\nstruct DomainInfo {\n 10: optional string name\n 12: optional string description\n 14: optional string owner\n 16: optional i32 status\n 18: optional i16 retentionDays\n 20: optional bool emitMetric\n 22: optional string archivalBucket\n 24: optional i16 archivalStatus\n 26: optional i64 (js.type = \"Long\") configVersion\n 28: optional i64 (js.type = \"Long\") notificationVersion\n 30: optional i64 (js.type = \"Long\") failoverNotificationVersion\n 32: optional i64 (js.type = \"Long\") failoverVersion\n 34: optional string activeClusterName\n 36: optional list clusters\n 38: optional map data\n 39: optional binary badBinaries\n 40: optional string badBinariesEncoding\n 42: optional i16 historyArchivalStatus\n 44: optional string historyArchivalURI\n 46: optional i16 visibilityArchivalStatus\n 48: optional string visibilityArchivalURI\n}\n\nstruct HistoryTreeInfo {\n 10: optional i64 (js.type = \"Long\") createdTimeNanos // For fork operation to prevent race condition of leaking event data when forking branches fail. Also can be used for clean up leaked data\n 12: optional list ancestors\n 14: optional string info // For lookup back to workflow during debugging, also background cleanup when fork operation cannot finish self cleanup due to crash.\n}\n\nstruct ReplicationInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") lastEventID\n}\n\nstruct WorkflowExecutionInfo {\n 10: optional binary parentDomainID\n 12: optional string parentWorkflowID\n 14: optional binary parentRunID\n 16: optional i64 (js.type = \"Long\") initiatedID\n 18: optional i64 (js.type = \"Long\") completionEventBatchID\n 20: optional binary completionEvent\n 22: optional string completionEventEncoding\n 24: optional string taskList\n 26: optional string workflowTypeName\n 28: optional i32 workflowTimeoutSeconds\n 30: optional i32 decisionTaskTimeoutSeconds\n 32: optional binary executionContext\n 34: optional i32 state\n 36: optional i32 closeStatus\n 38: optional i64 (js.type = \"Long\") startVersion\n 40: optional i64 (js.type = \"Long\") currentVersion\n 44: optional i64 (js.type = \"Long\") lastWriteEventID\n 46: optional map lastReplicationInfo\n 48: optional i64 (js.type = \"Long\") lastEventTaskID\n 50: optional i64 (js.type = \"Long\") lastFirstEventID\n 52: optional i64 (js.type = \"Long\") lastProcessedEvent\n 54: optional i64 (js.type = \"Long\") startTimeNanos\n 56: optional i64 (js.type = \"Long\") lastUpdatedTimeNanos\n 58: optional i64 (js.type = \"Long\") decisionVersion\n 60: optional i64 (js.type = \"Long\") decisionScheduleID\n 62: optional i64 (js.type = \"Long\") decisionStartedID\n 64: optional i32 decisionTimeout\n 66: optional i64 (js.type = \"Long\") decisionAttempt\n 68: optional i64 (js.type = \"Long\") decisionStartedTimestampNanos\n 69: optional i64 (js.type = \"Long\") decisionScheduledTimestampNanos\n 70: optional bool cancelRequested\n 71: optional i64 (js.type = \"Long\") decisionOriginalScheduledTimestampNanos\n 72: optional string createRequestID\n 74: optional string decisionRequestID\n 76: optional string cancelRequestID\n 78: optional string stickyTaskList\n 80: optional i64 (js.type = \"Long\") stickyScheduleToStartTimeout\n 82: optional i64 (js.type = \"Long\") retryAttempt\n 84: optional i32 retryInitialIntervalSeconds\n 86: optional i32 retryMaximumIntervalSeconds\n 88: optional i32 retryMaximumAttempts\n 90: optional i32 retryExpirationSeconds\n 92: optional double retryBackoffCoefficient\n 94: optional i64 (js.type = \"Long\") retryExpirationTimeNanos\n 96: optional list retryNonRetryableErrors\n 98: optional bool hasRetryPolicy\n 100: optional string cronSchedule\n 102: optional i32 eventStoreVersion\n 104: optional binary eventBranchToken\n 106: optional i64 (js.type = \"Long\") signalCount\n 108: optional i64 (js.type = \"Long\") historySize\n 110: optional string clientLibraryVersion\n 112: optional string clientFeatureVersion\n 114: optional string clientImpl\n 115: optional binary autoResetPoints\n 116: optional string autoResetPointsEncoding\n 118: optional map searchAttributes\n 120: optional map memo\n}\n\nstruct ActivityInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") scheduledEventBatchID\n 14: optional binary scheduledEvent\n 16: optional string scheduledEventEncoding\n 18: optional i64 (js.type = \"Long\") scheduledTimeNanos\n 20: optional i64 (js.type = \"Long\") startedID\n 22: optional binary startedEvent\n 24: optional string startedEventEncoding\n 26: optional i64 (js.type = \"Long\") startedTimeNanos\n 28: optional string activityID\n 30: optional string requestID\n 32: optional i32 scheduleToStartTimeoutSeconds\n 34: optional i32 scheduleToCloseTimeoutSeconds\n 36: optional i32 startToCloseTimeoutSeconds\n 38: optional i32 heartbeatTimeoutSeconds\n 40: optional bool cancelRequested\n 42: optional i64 (js.type = \"Long\") cancelRequestID\n 44: optional i32 timerTaskStatus\n 46: optional i32 attempt\n 48: optional string taskList\n 50: optional string startedIdentity\n 52: optional bool hasRetryPolicy\n 54: optional i32 retryInitialIntervalSeconds\n 56: optional i32 retryMaximumIntervalSeconds\n 58: optional i32 retryMaximumAttempts\n 60: optional i64 (js.type = \"Long\") retryExpirationTimeNanos\n 62: optional double retryBackoffCoefficient\n 64: optional list retryNonRetryableErrors\n 66: optional string retryLastFailureReason\n 68: optional string retryLastWorkerIdentity\n}\n\nstruct ChildExecutionInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") initiatedEventBatchID\n 14: optional i64 (js.type = \"Long\") startedID\n 16: optional binary initiatedEvent\n 18: optional string initiatedEventEncoding\n 20: optional string startedWorkflowID\n 22: optional binary startedRunID\n 24: optional binary startedEvent\n 26: optional string startedEventEncoding\n 28: optional string createRequestID\n 30: optional string domainName\n 32: optional string workflowTypeName\n}\n\nstruct SignalInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional string requestID\n 14: optional string name\n 16: optional binary input\n 18: optional binary control\n}\n\nstruct RequestCancelInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional string cancelRequestID\n}\n\nstruct TimerInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") startedID\n 14: optional i64 (js.type = \"Long\") expiryTimeNanos\n 16: optional i64 (js.type = \"Long\") taskID\n}\n\nstruct TaskInfo {\n 10: optional string workflowID\n 12: optional binary runID\n 13: optional i64 (js.type = \"Long\") scheduleID\n 14: optional i64 (js.type = \"Long\") expiryTimeNanos\n 15: optional i64 (js.type = \"Long\") createdTimeNanos\n}\n\nstruct TaskListInfo {\n 10: optional i16 kind // {Normal, Sticky}\n 12: optional i64 (js.type = \"Long\") ackLevel\n 14: optional i64 (js.type = \"Long\") expiryTimeNanos\n 16: optional i64 (js.type = \"Long\") lastUpdatedNanos\n}\n\nstruct TransferTaskInfo {\n 10: optional binary domainID\n 12: optional string workflowID\n 14: optional binary runID\n 16: optional i16 taskType\n 18: optional binary targetDomainID\n 20: optional string targetWorkflowID\n 22: optional binary targetRunID\n 24: optional string taskList\n 26: optional bool targetChildWorkflowOnly\n 28: optional i64 (js.type = \"Long\") scheduleID\n 30: optional i64 (js.type = \"Long\") version\n 32: optional i64 (js.type = \"Long\") visibilityTimestampNanos\n}\n\nstruct TimerTaskInfo {\n 10: optional binary domainID\n 12: optional string workflowID\n 14: optional binary runID\n 16: optional i16 taskType\n 18: optional i16 timeoutType\n 20: optional i64 (js.type = \"Long\") version\n 22: optional i64 (js.type = \"Long\") scheduleAttempt\n 24: optional i64 (js.type = \"Long\") eventID\n}\n\nstruct ReplicationTaskInfo {\n 10: optional binary domainID\n 12: optional string workflowID\n 14: optional binary runID\n 16: optional i16 taskType\n 18: optional i64 (js.type = \"Long\") version\n 20: optional i64 (js.type = \"Long\") firstEventID\n 22: optional i64 (js.type = \"Long\") nextEventID\n 24: optional i64 (js.type = \"Long\") scheduledID\n 26: optional i32 eventStoreVersion\n 28: optional i32 newRunEventStoreVersion\n 30: optional binary branch_token\n 32: optional map lastReplicationInfo\n 34: optional binary newRunBranchToken\n 36: optional bool resetWorkflow\n}" diff --git a/Gopkg.lock b/Gopkg.lock index ceeb2b1219e..ecd1fd45060 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -456,7 +456,7 @@ revision = "cc27d85e17cec9768d2ac401ea5d619a9628f16d" [[projects]] - digest = "1:aadf45babee88876f78829bac7d56936e8bd7495f349b61255c9709113c68dd3" + digest = "1:01eb93f225544756fe9ce5d275679758578c85ebd035f20bc81f445198ac3f59" name = "github.com/olivere/elastic" packages = [ ".", @@ -464,8 +464,8 @@ "uritemplates", ] pruneopts = "" - revision = "2ce6dee3af14e17dc059746a1de4f983169030e9" - version = "v6.2.21" + revision = "8ebe6a0fc23d9d53fbd8890da8ae7ee7cea78dbe" + version = "v6.2.22" [[projects]] digest = "1:1fc4897d3cc482d070651563c16a51489296cd9150e6d53fb7ff4d59a24334bc" @@ -645,7 +645,7 @@ version = "v1.0.0" [[projects]] - digest = "1:689cd2039f6fd296539759dc53d8c4dc6fe1260465347fff9819da01cfe86ee4" + digest = "1:aaef0733d4f8a38791c7b465fd4ab1a3f9afb6bdbd6bff2712deb803a0bcab1e" name = "github.com/uber-go/tally" packages = [ ".", @@ -658,8 +658,8 @@ "thirdparty/github.com/apache/thrift/lib/go/thrift", ] pruneopts = "" - revision = "f266f90e9c4d5894364039a324a05d061f2f34e2" - version = "v3.3.11" + revision = "3332297784e46cd346ab6d9894fd4ea027dc9368" + version = "v3.3.12" [[projects]] digest = "1:86555acbb9507153d3cd0d032e07279ba89e38aadc8200cfca3b5d14c98b4daf" @@ -705,11 +705,11 @@ [[projects]] branch = "master" - digest = "1:0c0682804b7a414090df12c40e1f28d8acc4fe8b626eb67daf060804a5da081d" + digest = "1:13a24c4398a86df35169b438a14a81eef3b63037ea57fd609aa291461d1a003b" name = "github.com/urfave/cli" packages = ["."] pruneopts = "" - revision = "693af58b4d51b8fcc7f9d89576da170765980581" + revision = "93392d12e8cd60a1c7c62dc4bf4bab7a8e001eec" [[projects]] digest = "1:ded68a73f6dcedbf1b90909e00a28417854bfa8fb3a4ca6f12f11960e151214e" @@ -746,7 +746,7 @@ [[projects]] branch = "master" - digest = "1:abd37a913ab309f064de3824b3515dfc55145787c0b4c0867ea304057e4ea9d9" + digest = "1:7a04aa4482371ededbe3f26410b80c8ab50415050947fa43140c165f7face690" name = "go.uber.org/cadence" packages = [ ".", @@ -768,7 +768,7 @@ "workflow", ] pruneopts = "" - revision = "092191788628a99de99f0ea82ef64f607c787694" + revision = "1e3023da270d1dc51fbcf8cd47404f3ce4018834" [[projects]] digest = "1:22c7effcb4da0eacb2bb1940ee173fac010e9ef3c691f5de4b524d538bd980f5" @@ -935,14 +935,14 @@ [[projects]] branch = "master" - digest = "1:022f4bd6739cc585724b670eb38b3fbd3efe60d45cd6b279c990e434f8a8f6ea" + digest = "1:0b5c2207c72f2d13995040f176feb6e3f453d6b01af2b9d57df76b05ded2e926" name = "golang.org/x/sys" packages = [ "unix", "windows", ] pruneopts = "" - revision = "cbf593c0f2f39034e9104bbf77e2ec7c48c98fc5" + revision = "51ab0e2deafac1f46c46ad59cf0921be2f180c3d" [[projects]] branch = "master" @@ -954,7 +954,7 @@ [[projects]] branch = "master" - digest = "1:8f1daaef904fd6f9850bbde919a36e67fecf42586c56cfef4655f29f8225ea8b" + digest = "1:761bb6ea1becb2ac789e5931c1c413cc063b14bc6f6dabe78cc0df9fa819bae6" name = "golang.org/x/tools" packages = [ "cmd/stringer", @@ -974,7 +974,7 @@ "internal/semver", ] pruneopts = "" - revision = "e9bb7d36c0606a2c422dffe30db7d628ebeb9302" + revision = "88ddfcebc769cb7884c38d144ee893cfb4519053" [[projects]] digest = "1:47f391ee443f578f01168347818cb234ed819521e49e4d2c8dd2fb80d48ee41a" diff --git a/client/clientfactory.go b/client/clientfactory.go index 2bc40fc8028..9335b1eef0f 100644 --- a/client/clientfactory.go +++ b/client/clientfactory.go @@ -23,8 +23,6 @@ package client import ( "time" - "go.uber.org/yarpc" - "github.com/uber/cadence/.gen/go/admin/adminserviceclient" "github.com/uber/cadence/.gen/go/cadence/workflowserviceclient" "github.com/uber/cadence/.gen/go/history/historyserviceclient" @@ -34,9 +32,11 @@ import ( "github.com/uber/cadence/client/history" "github.com/uber/cadence/client/matching" "github.com/uber/cadence/common" + "github.com/uber/cadence/common/log" "github.com/uber/cadence/common/membership" "github.com/uber/cadence/common/metrics" "github.com/uber/cadence/common/service/dynamicconfig" + "go.uber.org/yarpc" ) const ( @@ -74,6 +74,7 @@ type ( metricsClient metrics.Client dynConfig *dynamicconfig.Collection numberOfHistoryShards int + logger log.Logger } ) @@ -84,6 +85,7 @@ func NewRPCClientFactory( metricsClient metrics.Client, dc *dynamicconfig.Collection, numberOfHistoryShards int, + logger log.Logger, ) Factory { return &rpcClientFactory{ rpcFactory: rpcFactory, @@ -91,6 +93,7 @@ func NewRPCClientFactory( metricsClient: metricsClient, dynConfig: dc, numberOfHistoryShards: numberOfHistoryShards, + logger: logger, } } @@ -125,7 +128,7 @@ func (cf *rpcClientFactory) NewHistoryClientWithTimeout(timeout time.Duration) ( return historyserviceclient.New(dispatcher.ClientConfig(common.HistoryServiceName)), nil } - client := history.NewClient(cf.numberOfHistoryShards, timeout, common.NewClientCache(keyResolver, clientProvider)) + client := history.NewClient(cf.numberOfHistoryShards, timeout, common.NewClientCache(keyResolver, clientProvider), cf.logger) if cf.metricsClient != nil { client = history.NewMetricClient(client, cf.metricsClient) } diff --git a/client/frontend/client.go b/client/frontend/client.go index 9c38ac6ec1e..135514351c5 100644 --- a/client/frontend/client.go +++ b/client/frontend/client.go @@ -24,12 +24,12 @@ import ( "context" "time" - "go.uber.org/yarpc" - "github.com/pborman/uuid" "github.com/uber/cadence/.gen/go/cadence/workflowserviceclient" + "github.com/uber/cadence/.gen/go/replicator" "github.com/uber/cadence/.gen/go/shared" "github.com/uber/cadence/common" + "go.uber.org/yarpc" ) var _ Client = (*clientImpl)(nil) @@ -643,3 +643,18 @@ func (c *clientImpl) getRandomClient() (workflowserviceclient.Interface, error) return client.(workflowserviceclient.Interface), nil } + +func (c *clientImpl) GetReplicationMessages( + ctx context.Context, + request *replicator.GetReplicationMessagesRequest, + opts ...yarpc.CallOption, +) (*replicator.GetReplicationMessagesResponse, error) { + opts = common.AggregateYarpcOptions(ctx, opts...) + client, err := c.getRandomClient() + if err != nil { + return nil, err + } + ctx, cancel := c.createContext(ctx) + defer cancel() + return client.GetReplicationMessages(ctx, request, opts...) +} diff --git a/client/frontend/metricClient.go b/client/frontend/metricClient.go index 8111f78e294..e2811dff5f0 100644 --- a/client/frontend/metricClient.go +++ b/client/frontend/metricClient.go @@ -23,6 +23,7 @@ package frontend import ( "context" + "github.com/uber/cadence/.gen/go/replicator" "github.com/uber/cadence/.gen/go/shared" "github.com/uber/cadence/common/metrics" "go.uber.org/yarpc" @@ -671,3 +672,20 @@ func (c *metricClient) UpdateDomain( } return resp, err } + +func (c *metricClient) GetReplicationMessages( + ctx context.Context, + request *replicator.GetReplicationMessagesRequest, + opts ...yarpc.CallOption, +) (*replicator.GetReplicationMessagesResponse, error) { + c.metricsClient.IncCounter(metrics.FrontendClientGetReplicationTasksScope, metrics.CadenceClientRequests) + + sw := c.metricsClient.StartTimer(metrics.FrontendClientGetReplicationTasksScope, metrics.CadenceClientLatency) + resp, err := c.client.GetReplicationMessages(ctx, request, opts...) + sw.Stop() + + if err != nil { + c.metricsClient.IncCounter(metrics.FrontendClientGetReplicationTasksScope, metrics.CadenceClientFailures) + } + return resp, err +} diff --git a/client/frontend/retryableClient.go b/client/frontend/retryableClient.go index 9a43bc9b7e5..08df6451d00 100644 --- a/client/frontend/retryableClient.go +++ b/client/frontend/retryableClient.go @@ -23,6 +23,7 @@ package frontend import ( "context" + "github.com/uber/cadence/.gen/go/replicator" "github.com/uber/cadence/.gen/go/shared" "github.com/uber/cadence/common/backoff" "go.uber.org/yarpc" @@ -551,3 +552,18 @@ func (c *retryableClient) UpdateDomain( err := backoff.Retry(op, c.policy, c.isRetryable) return resp, err } + +func (c *retryableClient) GetReplicationMessages( + ctx context.Context, + request *replicator.GetReplicationMessagesRequest, + opts ...yarpc.CallOption, +) (*replicator.GetReplicationMessagesResponse, error) { + var resp *replicator.GetReplicationMessagesResponse + op := func() error { + var err error + resp, err = c.client.GetReplicationMessages(ctx, request, opts...) + return err + } + err := backoff.Retry(op, c.policy, c.isRetryable) + return resp, err +} diff --git a/client/history/client.go b/client/history/client.go index 9ca84f81cda..475a9147eec 100644 --- a/client/history/client.go +++ b/client/history/client.go @@ -22,12 +22,16 @@ package history import ( "context" + "sync" "time" h "github.com/uber/cadence/.gen/go/history" "github.com/uber/cadence/.gen/go/history/historyserviceclient" + "github.com/uber/cadence/.gen/go/replicator" workflow "github.com/uber/cadence/.gen/go/shared" "github.com/uber/cadence/common" + "github.com/uber/cadence/common/log" + "github.com/uber/cadence/common/log/tag" "go.uber.org/yarpc" ) @@ -43,6 +47,7 @@ type clientImpl struct { tokenSerializer common.TaskTokenSerializer timeout time.Duration clients common.ClientCache + logger log.Logger } // NewClient creates a new history service TChannel client @@ -50,12 +55,14 @@ func NewClient( numberOfShards int, timeout time.Duration, clients common.ClientCache, + logger log.Logger, ) Client { return &clientImpl{ numberOfShards: numberOfShards, tokenSerializer: common.NewJSONTaskTokenSerializer(), timeout: timeout, clients: clients, + logger: logger, } } @@ -638,6 +645,58 @@ func (c *clientImpl) SyncActivity( return err } +func (c *clientImpl) GetReplicationMessages( + ctx context.Context, + request *replicator.GetReplicationMessagesRequest, + opts ...yarpc.CallOption, +) (*replicator.GetReplicationMessagesResponse, error) { + requestsByClient := make(map[historyserviceclient.Interface]*replicator.GetReplicationMessagesRequest) + + for _, token := range request.Tokens { + client, err := c.getClientForShardID(int(token.GetShardID())) + if err != nil { + return nil, err + } + + if _, ok := requestsByClient[client]; !ok { + requestsByClient[client] = &replicator.GetReplicationMessagesRequest{} + } + + req := requestsByClient[client] + req.Tokens = append(req.Tokens, token) + } + + var wg sync.WaitGroup + wg.Add(len(requestsByClient)) + respChan := make(chan *replicator.GetReplicationMessagesResponse, len(requestsByClient)) + for client, req := range requestsByClient { + go func(client historyserviceclient.Interface, request *replicator.GetReplicationMessagesRequest) { + defer wg.Done() + + ctx, cancel := c.createContext(ctx) + defer cancel() + resp, err := client.GetReplicationMessages(ctx, request, opts...) + if err != nil { + c.logger.Warn("Failed to get replication tasks from client", tag.Error(err)) + return + } + respChan <- resp + }(client, req) + } + + wg.Wait() + close(respChan) + + response := &replicator.GetReplicationMessagesResponse{MessagesByShard: make(map[int32]*replicator.ReplicationMessages)} + for resp := range respChan { + for shardID, tasks := range resp.MessagesByShard { + response.MessagesByShard[shardID] = tasks + } + } + + return response, nil +} + func (c *clientImpl) createContext(parent context.Context) (context.Context, context.CancelFunc) { if parent == nil { return context.WithTimeout(context.Background(), c.timeout) diff --git a/client/history/metricClient.go b/client/history/metricClient.go index 3722c6d8377..12f7315c666 100644 --- a/client/history/metricClient.go +++ b/client/history/metricClient.go @@ -24,6 +24,7 @@ import ( "context" h "github.com/uber/cadence/.gen/go/history" + "github.com/uber/cadence/.gen/go/replicator" "github.com/uber/cadence/.gen/go/shared" "github.com/uber/cadence/common/metrics" "go.uber.org/yarpc" @@ -469,3 +470,21 @@ func (c *metricClient) SyncActivity( return err } + +func (c *metricClient) GetReplicationMessages( + ctx context.Context, + request *replicator.GetReplicationMessagesRequest, + opts ...yarpc.CallOption, +) (*replicator.GetReplicationMessagesResponse, error) { + c.metricsClient.IncCounter(metrics.HistoryClientGetReplicationTasksScope, metrics.CadenceClientRequests) + + sw := c.metricsClient.StartTimer(metrics.HistoryClientGetReplicationTasksScope, metrics.CadenceClientLatency) + resp, err := c.client.GetReplicationMessages(ctx, request, opts...) + sw.Stop() + + if err != nil { + c.metricsClient.IncCounter(metrics.HistoryClientGetReplicationTasksScope, metrics.CadenceClientFailures) + } + + return resp, err +} diff --git a/client/history/retryableClient.go b/client/history/retryableClient.go index 842283b6cb8..b4b2325edd6 100644 --- a/client/history/retryableClient.go +++ b/client/history/retryableClient.go @@ -24,6 +24,7 @@ import ( "context" h "github.com/uber/cadence/.gen/go/history" + "github.com/uber/cadence/.gen/go/replicator" "github.com/uber/cadence/.gen/go/shared" "github.com/uber/cadence/common/backoff" "go.uber.org/yarpc" @@ -405,3 +406,19 @@ func (c *retryableClient) SyncActivity( return backoff.Retry(op, c.policy, c.isRetryable) } + +func (c *retryableClient) GetReplicationMessages( + ctx context.Context, + request *replicator.GetReplicationMessagesRequest, + opts ...yarpc.CallOption, +) (*replicator.GetReplicationMessagesResponse, error) { + var resp *replicator.GetReplicationMessagesResponse + op := func() error { + var err error + resp, err = c.client.GetReplicationMessages(ctx, request, opts...) + return err + } + + err := backoff.Retry(op, c.policy, c.isRetryable) + return resp, err +} diff --git a/cmd/server/server.go b/cmd/server/server.go index 4b6da208320..7c58303d354 100644 --- a/cmd/server/server.go +++ b/cmd/server/server.go @@ -137,6 +137,7 @@ func (s *server) startService() common.Daemon { clusterMetadata.MasterClusterName, clusterMetadata.CurrentClusterName, clusterMetadata.ClusterInformation, + clusterMetadata.ReplicationConsumer, ) if s.cfg.PublicClient.HostPort != "" { diff --git a/common/cluster/metadata.go b/common/cluster/metadata.go index db2db77d003..a250907137c 100644 --- a/common/cluster/metadata.go +++ b/common/cluster/metadata.go @@ -49,6 +49,8 @@ type ( GetAllClusterInfo() map[string]config.ClusterInformation // ClusterNameForFailoverVersion return the corresponding cluster name for a given failover version ClusterNameForFailoverVersion(failoverVersion int64) string + // GetReplicationConsumerConfig returns the config for replication task consumer. + GetReplicationConsumerConfig() *config.ReplicationConsumerConfig } metadataImpl struct { @@ -67,6 +69,8 @@ type ( clusterInfo map[string]config.ClusterInformation // versionToClusterName contains all initial version -> corresponding cluster name versionToClusterName map[int64]string + //replicationConsumer returns the config for replication task consumer. + replicationConsumer *config.ReplicationConsumerConfig } ) @@ -78,6 +82,7 @@ func NewMetadata( masterClusterName string, currentClusterName string, clusterInfo map[string]config.ClusterInformation, + replicationConsumer *config.ReplicationConsumerConfig, ) Metadata { if len(clusterInfo) == 0 { @@ -122,6 +127,7 @@ func NewMetadata( return &metadataImpl{ logger: logger, enableGlobalDomain: enableGlobalDomain, + replicationConsumer: replicationConsumer, failoverVersionIncrement: failoverVersionIncrement, masterClusterName: masterClusterName, currentClusterName: currentClusterName, @@ -195,3 +201,11 @@ func (metadata *metadataImpl) ClusterNameForFailoverVersion(failoverVersion int6 } return clusterName } + +func (metadata *metadataImpl) GetReplicationConsumerConfig() *config.ReplicationConsumerConfig { + if metadata.replicationConsumer == nil { + return &config.ReplicationConsumerConfig{Type: config.ReplicationConsumerTypeKafka} + } + + return metadata.replicationConsumer +} diff --git a/common/cluster/metadataTestBase.go b/common/cluster/metadataTestBase.go index 03ee9cd6813..5812968763b 100644 --- a/common/cluster/metadataTestBase.go +++ b/common/cluster/metadataTestBase.go @@ -91,6 +91,15 @@ func GetTestClusterMetadata(enableGlobalDomain bool, isMasterCluster bool) Metad masterClusterName, TestCurrentClusterName, TestAllClusterInfo, + &config.ReplicationConsumerConfig{ + Type: config.ReplicationConsumerTypeRPC, + FetcherConfig: &config.FetcherConfig{ + RPCParallelism: 1, + AggregationIntervalSecs: 2, + ErrorRetryWaitSecs: 1, + TimerJitterCoefficient: 0.15, + }, + }, ) } @@ -101,5 +110,6 @@ func GetTestClusterMetadata(enableGlobalDomain bool, isMasterCluster bool) Metad TestCurrentClusterName, TestCurrentClusterName, TestSingleDCClusterInfo, + nil, ) } diff --git a/common/daemon.go b/common/daemon.go index c3a0156ade1..e5e749e8dfe 100644 --- a/common/daemon.go +++ b/common/daemon.go @@ -33,7 +33,7 @@ const ( type ( // Daemon is the base interfaces implemented by - // background tasks within cherami + // background tasks within Cadence Daemon interface { Start() Stop() diff --git a/common/metrics/defs.go b/common/metrics/defs.go index baf5dd0e751..db1a51ac25d 100644 --- a/common/metrics/defs.go +++ b/common/metrics/defs.go @@ -279,6 +279,8 @@ const ( HistoryClientSyncShardStatusScope // HistoryClientSyncActivityScope tracks RPC calls to history service HistoryClientSyncActivityScope + // HistoryClientGetReplicationTasksScope tracks RPC calls to history service + HistoryClientGetReplicationTasksScope // MatchingClientPollForDecisionTaskScope tracks RPC calls to matching service MatchingClientPollForDecisionTaskScope // MatchingClientPollForActivityTaskScope tracks RPC calls to matching service @@ -365,6 +367,8 @@ const ( FrontendClientCountWorkflowExecutionsScope // FrontendClientGetSearchAttributesScope tracks RPC calls to frontend service FrontendClientGetSearchAttributesScope + // FrontendClientGetReplicationTasksScope tracks RPC calls to frontend service + FrontendClientGetReplicationTasksScope // AdminClientAddSearchAttributeScope tracks RPC calls to admin service AdminClientAddSearchAttributeScope // AdminClientDescribeHistoryHostScope tracks RPC calls to admin service @@ -618,6 +622,8 @@ const ( FrontendResetWorkflowExecutionScope // FrontendGetSearchAttributesScope is the metric scope for frontend.GetSearchAttributes FrontendGetSearchAttributesScope + // FrontendGetReplicationTasksScope is the metric scope for frontend.GetReplicationTasks + FrontendGetReplicationTasksScope NumFrontendScopes ) @@ -796,6 +802,8 @@ const ( WorkflowCompletionStatsScope // ArchiverClientScope is scope used by all metrics emitted by archiver.Client ArchiverClientScope + // ReplicationTaskFetcherScope is scope used by all metrics emitted by ReplicationTaskFetcher + ReplicationTaskFetcherScope NumHistoryScopes ) @@ -949,6 +957,7 @@ var ScopeDefs = map[ServiceIdx]map[int]scopeDefinition{ HistoryClientReplicateRawEventsScope: {operation: "HistoryClientReplicateRawEvents", tags: map[string]string{CadenceRoleTagName: HistoryRoleTagValue}}, HistoryClientSyncShardStatusScope: {operation: "HistoryClientSyncShardStatusScope", tags: map[string]string{CadenceRoleTagName: HistoryRoleTagValue}}, HistoryClientSyncActivityScope: {operation: "HistoryClientSyncActivityScope", tags: map[string]string{CadenceRoleTagName: HistoryRoleTagValue}}, + HistoryClientGetReplicationTasksScope: {operation: "HistoryClientGetReplicationTasksScope", tags: map[string]string{CadenceRoleTagName: HistoryRoleTagValue}}, MatchingClientPollForDecisionTaskScope: {operation: "MatchingClientPollForDecisionTask", tags: map[string]string{CadenceRoleTagName: MatchingRoleTagValue}}, MatchingClientPollForActivityTaskScope: {operation: "MatchingClientPollForActivityTask", tags: map[string]string{CadenceRoleTagName: MatchingRoleTagValue}}, MatchingClientAddActivityTaskScope: {operation: "MatchingClientAddActivityTask", tags: map[string]string{CadenceRoleTagName: MatchingRoleTagValue}}, @@ -992,6 +1001,7 @@ var ScopeDefs = map[ServiceIdx]map[int]scopeDefinition{ FrontendClientScanWorkflowExecutionsScope: {operation: "FrontendClientScanWorkflowExecutions", tags: map[string]string{CadenceRoleTagName: FrontendRoleTagValue}}, FrontendClientCountWorkflowExecutionsScope: {operation: "FrontendClientCountWorkflowExecutions", tags: map[string]string{CadenceRoleTagName: FrontendRoleTagValue}}, FrontendClientGetSearchAttributesScope: {operation: "FrontendClientGetSearchAttributes", tags: map[string]string{CadenceRoleTagName: FrontendRoleTagValue}}, + FrontendClientGetReplicationTasksScope: {operation: "FrontendClientGetReplicationTasksScope", tags: map[string]string{CadenceRoleTagName: FrontendRoleTagValue}}, AdminClientAddSearchAttributeScope: {operation: "AdminClientAddSearchAttribute", tags: map[string]string{CadenceRoleTagName: AdminRoleTagValue}}, AdminClientDescribeHistoryHostScope: {operation: "AdminClientDescribeHistoryHost", tags: map[string]string{CadenceRoleTagName: AdminRoleTagValue}}, AdminClientDescribeWorkflowExecutionScope: {operation: "AdminClientDescribeWorkflowExecution", tags: map[string]string{CadenceRoleTagName: AdminRoleTagValue}}, @@ -1110,6 +1120,7 @@ var ScopeDefs = map[ServiceIdx]map[int]scopeDefinition{ FrontendDescribeTaskListScope: {operation: "DescribeTaskList"}, FrontendResetStickyTaskListScope: {operation: "ResetStickyTaskList"}, FrontendGetSearchAttributesScope: {operation: "GetSearchAttributes"}, + FrontendGetReplicationTasksScope: {operation: "GetReplicationTasks"}, }, // History Scope Names History: { @@ -1199,6 +1210,7 @@ var ScopeDefs = map[ServiceIdx]map[int]scopeDefinition{ SessionCountStatsScope: {operation: "SessionStats", tags: map[string]string{StatsTypeTagName: CountStatsTypeTagValue}}, WorkflowCompletionStatsScope: {operation: "CompletionStats", tags: map[string]string{StatsTypeTagName: CountStatsTypeTagValue}}, ArchiverClientScope: {operation: "ArchiverClient"}, + ReplicationTaskFetcherScope: {operation: "ReplicationTaskFetcher"}, }, // Matching Scope Names Matching: { @@ -1435,6 +1447,10 @@ const ( ArchiverClientSendSignalFailureCount ArchiverClientInlineArchiveAttemptCount ArchiverClientInlineArchiveFailureCount + LastRetrievedMessageID + LastProcessedMessageID + ReplicationTasksApplied + ReplicationTasksFailed NumHistoryMetrics ) @@ -1711,6 +1727,10 @@ var MetricDefs = map[ServiceIdx]map[int]metricDefinition{ ArchiverClientSendSignalFailureCount: {metricName: "archiver_client_send_signal_error", metricType: Counter}, ArchiverClientInlineArchiveAttemptCount: {metricName: "archiver_client_inline_archive_attempt", metricType: Counter}, ArchiverClientInlineArchiveFailureCount: {metricName: "archiver_client_inline_archive_failure", metricType: Counter}, + LastRetrievedMessageID: {metricName: "last_retrieved_message_id", metricType: Gauge}, + LastProcessedMessageID: {metricName: "last_processed_message_id", metricType: Gauge}, + ReplicationTasksApplied: {metricName: "replication_tasks_applied", metricType: Counter}, + ReplicationTasksFailed: {metricName: "replication_tasks_failed", metricType: Counter}, }, Matching: { PollSuccessCounter: {metricName: "poll_success"}, diff --git a/common/mocks/ClusterMetadata.go b/common/mocks/ClusterMetadata.go index 5197b946043..76cab8c10f8 100644 --- a/common/mocks/ClusterMetadata.go +++ b/common/mocks/ClusterMetadata.go @@ -157,3 +157,17 @@ func (_m *ClusterMetadata) IsMasterCluster() bool { return r0 } + +// GetReplicationConsumerConfig provides a mock function with given fields: +func (_m *ClusterMetadata) GetReplicationConsumerConfig() *config.ReplicationConsumerConfig { + ret := _m.Called() + + var r0 *config.ReplicationConsumerConfig + if rf, ok := ret.Get(0).(func() *config.ReplicationConsumerConfig); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(*config.ReplicationConsumerConfig) + } + + return r0 +} \ No newline at end of file diff --git a/common/mocks/FrontendClient.go b/common/mocks/FrontendClient.go index 9c7ef195de2..45acaf39058 100644 --- a/common/mocks/FrontendClient.go +++ b/common/mocks/FrontendClient.go @@ -25,6 +25,7 @@ package mocks import ( context "context" + "github.com/uber/cadence/.gen/go/replicator" mock "github.com/stretchr/testify/mock" "github.com/uber/cadence/.gen/go/cadence/workflowserviceclient" shared "github.com/uber/cadence/.gen/go/shared" @@ -970,3 +971,33 @@ func (_m *FrontendClient) UpdateDomain(ctx context.Context, UpdateRequest *share return r0, r1 } + +// GetReplicationMessages provides a mock function with given fields: ctx, request, opts +func (_m *FrontendClient) GetReplicationMessages(ctx context.Context, request *replicator.GetReplicationMessagesRequest, opts ...yarpc.CallOption) (*replicator.GetReplicationMessagesResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, request) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 *replicator.GetReplicationMessagesResponse + if rf, ok := ret.Get(0).(func(context.Context, *replicator.GetReplicationMessagesRequest, ...yarpc.CallOption) *replicator.GetReplicationMessagesResponse); ok { + r0 = rf(ctx, request, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*replicator.GetReplicationMessagesResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *replicator.GetReplicationMessagesRequest, ...yarpc.CallOption) error); ok { + r1 = rf(ctx, request, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/common/mocks/HistoryClient.go b/common/mocks/HistoryClient.go index d9946861c27..ff9dd94367d 100644 --- a/common/mocks/HistoryClient.go +++ b/common/mocks/HistoryClient.go @@ -23,8 +23,8 @@ package mocks import ( "context" + "github.com/uber/cadence/.gen/go/replicator" "go.uber.org/yarpc" - "github.com/stretchr/testify/mock" "github.com/uber/cadence/.gen/go/history" "github.com/uber/cadence/.gen/go/history/historyserviceclient" @@ -511,3 +511,30 @@ func (_m *HistoryClient) SyncActivity(ctx context.Context, request *history.Sync return r0 } + +// GetReplicationMessages provides a mock function with given fields: ctx, request +func (_m *HistoryClient) GetReplicationMessages( + ctx context.Context, + request *replicator.GetReplicationMessagesRequest, + opts ...yarpc.CallOption, +) (*replicator.GetReplicationMessagesResponse, error) { + ret := _m.Called(ctx, request) + + var r0 *replicator.GetReplicationMessagesResponse + if rf, ok := ret.Get(0).(func(context.Context, *replicator.GetReplicationMessagesRequest) *replicator.GetReplicationMessagesResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*replicator.GetReplicationMessagesResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *replicator.GetReplicationMessagesRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/common/persistence/cassandra/cassandraPersistence.go b/common/persistence/cassandra/cassandraPersistence.go index dd5b36038da..893c6d91358 100644 --- a/common/persistence/cassandra/cassandraPersistence.go +++ b/common/persistence/cassandra/cassandraPersistence.go @@ -107,7 +107,8 @@ const ( `timer_ack_level: ?, ` + `cluster_transfer_ack_level: ?, ` + `cluster_timer_ack_level: ?, ` + - `domain_notification_version: ? ` + + `domain_notification_version: ?, ` + + `cluster_replication_level: ? ` + `}` templateWorkflowExecutionType = `{` + @@ -915,6 +916,7 @@ func (d *cassandraPersistence) CreateShard(request *p.CreateShardRequest) error shardInfo.ClusterTransferAckLevel, shardInfo.ClusterTimerAckLevel, shardInfo.DomainNotificationVersion, + shardInfo.ClusterReplicationLevel, shardInfo.RangeID) previous := make(map[string]interface{}) @@ -990,6 +992,7 @@ func (d *cassandraPersistence) UpdateShard(request *p.UpdateShardRequest) error shardInfo.ClusterTransferAckLevel, shardInfo.ClusterTimerAckLevel, shardInfo.DomainNotificationVersion, + shardInfo.ClusterReplicationLevel, shardInfo.RangeID, shardInfo.ShardID, rowTypeShard, diff --git a/common/persistence/cassandra/cassandraPersistenceUtil.go b/common/persistence/cassandra/cassandraPersistenceUtil.go index 43eca928884..b814c9eb760 100644 --- a/common/persistence/cassandra/cassandraPersistenceUtil.go +++ b/common/persistence/cassandra/cassandraPersistenceUtil.go @@ -1619,6 +1619,8 @@ func createShardInfo( info.ClusterTimerAckLevel = v.(map[string]time.Time) case "domain_notification_version": info.DomainNotificationVersion = v.(int64) + case "cluster_replication_level": + info.ClusterReplicationLevel = v.(map[string]int64) } } @@ -1632,6 +1634,9 @@ func createShardInfo( currentCluster: info.TimerAckLevel, } } + if info.ClusterReplicationLevel == nil { + info.ClusterReplicationLevel = make(map[string]int64) + } return info } diff --git a/common/persistence/dataInterfaces.go b/common/persistence/dataInterfaces.go index 55a56147422..33cf992afcf 100644 --- a/common/persistence/dataInterfaces.go +++ b/common/persistence/dataInterfaces.go @@ -203,6 +203,7 @@ type ( ClusterTimerAckLevel map[string]time.Time TransferFailoverLevels map[string]TransferFailoverLevel // uuid -> TransferFailoverLevel TimerFailoverLevels map[string]TimerFailoverLevel // uuid -> TimerFailoverLevel + ClusterReplicationLevel map[string]int64 // cluster -> last replicated taskID DomainNotificationVersion int64 } diff --git a/common/persistence/persistence-tests/executionManagerTest.go b/common/persistence/persistence-tests/executionManagerTest.go index 643860af6c8..34a554775dd 100644 --- a/common/persistence/persistence-tests/executionManagerTest.go +++ b/common/persistence/persistence-tests/executionManagerTest.go @@ -3687,14 +3687,15 @@ func (s *ExecutionManagerSuite) TestCreateGetShardBackfill() { currentClusterTransferAck := int64(21) currentClusterTimerAck := timestampConvertor(time.Now().Add(-10 * time.Second)) shardInfo := &p.ShardInfo{ - ShardID: shardID, - Owner: "some random owner", - RangeID: rangeID, - StolenSinceRenew: 12, - UpdatedAt: timestampConvertor(time.Now()), - ReplicationAckLevel: currentReplicationAck, - TransferAckLevel: currentClusterTransferAck, - TimerAckLevel: currentClusterTimerAck, + ShardID: shardID, + Owner: "some random owner", + RangeID: rangeID, + StolenSinceRenew: 12, + UpdatedAt: timestampConvertor(time.Now()), + ReplicationAckLevel: currentReplicationAck, + TransferAckLevel: currentClusterTransferAck, + TimerAckLevel: currentClusterTimerAck, + ClusterReplicationLevel: map[string]int64{}, } createRequest := &p.CreateShardRequest{ ShardInfo: shardInfo, @@ -3749,6 +3750,7 @@ func (s *ExecutionManagerSuite) TestCreateGetUpdateGetShard() { cluster.TestAlternativeClusterName: alternativeClusterTimerAck, }, DomainNotificationVersion: domainNotificationVersion, + ClusterReplicationLevel: map[string]int64{}, } createRequest := &p.CreateShardRequest{ ShardInfo: shardInfo, @@ -3790,6 +3792,7 @@ func (s *ExecutionManagerSuite) TestCreateGetUpdateGetShard() { cluster.TestAlternativeClusterName: alternativeClusterTimerAck, }, DomainNotificationVersion: domainNotificationVersion, + ClusterReplicationLevel: map[string]int64{cluster.TestAlternativeClusterName: 12345}, } updateRequest := &p.UpdateShardRequest{ ShardInfo: shardInfo, diff --git a/common/persistence/sql/sqlShardManager.go b/common/persistence/sql/sqlShardManager.go index 935dd861a5d..e0ebe3f2730 100644 --- a/common/persistence/sql/sqlShardManager.go +++ b/common/persistence/sql/sqlShardManager.go @@ -109,6 +109,10 @@ func (m *sqlShardManager) GetShard(request *persistence.GetShardRequest) (*persi } } + if shardInfo.ClusterReplicationLevel == nil { + shardInfo.ClusterReplicationLevel = make(map[string]int64) + } + resp := &persistence.GetShardResponse{ShardInfo: &persistence.ShardInfo{ ShardID: int(row.ShardID), RangeID: row.RangeID, @@ -121,6 +125,7 @@ func (m *sqlShardManager) GetShard(request *persistence.GetShardRequest) (*persi ClusterTransferAckLevel: shardInfo.ClusterTransferAckLevel, ClusterTimerAckLevel: timerAckLevel, DomainNotificationVersion: shardInfo.GetDomainNotificationVersion(), + ClusterReplicationLevel: shardInfo.ClusterReplicationLevel, }} return resp, nil @@ -215,6 +220,7 @@ func shardInfoToShardsRow(s persistence.ShardInfo) (*sqldb.ShardsRow, error) { ClusterTimerAckLevel: timerAckLevels, DomainNotificationVersion: common.Int64Ptr(s.DomainNotificationVersion), Owner: &s.Owner, + ClusterReplicationLevel: s.ClusterReplicationLevel, } blob, err := shardInfoToBlob(shardInfo) diff --git a/common/service/config/config.go b/common/service/config/config.go index de0e071b270..71d86b578a4 100644 --- a/common/service/config/config.go +++ b/common/service/config/config.go @@ -32,6 +32,13 @@ import ( "github.com/uber/ringpop-go/discovery" ) +const ( + // ReplicationConsumerTypeKafka means consuming replication tasks from kafka. + ReplicationConsumerTypeKafka = "kafka" + // ReplicationConsumerTypeRPC means pulling source DC for replication tasks. + ReplicationConsumerTypeRPC = "rpc" +) + type ( // Config contains the configuration for a set of cadence services Config struct { @@ -42,7 +49,7 @@ type ( // Log is the logging config Log Logger `yaml:"log"` // ClusterMetadata is the config containing all valid clusters and active cluster - ClusterMetadata ClusterMetadata `yaml:"clusterMetadata"` + ClusterMetadata *ClusterMetadata `yaml:"clusterMetadata"` // DCRedirectionPolicy contains the frontend datacenter redirection policy DCRedirectionPolicy DCRedirectionPolicy `yaml:"dcRedirectionPolicy"` // Services is a map of service name to service config items @@ -225,6 +232,8 @@ type ( // ClusterMetadata contains the all cluster which participated in cross DC ClusterMetadata struct { EnableGlobalDomain bool `yaml:"enableGlobalDomain"` + // ReplicationConsumerConfig determines how we consume replication tasks. + ReplicationConsumer *ReplicationConsumerConfig `yaml:"replicationConsumer"` // FailoverVersionIncrement is the increment of each cluster version when failover happens FailoverVersionIncrement int64 `yaml:"failoverVersionIncrement"` // MasterClusterName is the master cluster name, only the master cluster can register / update domain @@ -246,6 +255,22 @@ type ( RPCAddress string `yaml:"rpcAddress"` } + // ReplicationConsumerConfig contains config for replication consumer + ReplicationConsumerConfig struct { + // Type determines how we consume replication tasks. It can be either kafka(default) or rpc. + Type string `yaml:"type"` + // FetcherConfig is the config for replication task fetcher. + FetcherConfig *FetcherConfig `yaml:"fetcher"` + } + + // FetcherConfig is the config for replication task fetcher. + FetcherConfig struct { + RPCParallelism int `yaml:"rpcParallelism"` + AggregationIntervalSecs int `yaml:"aggregationIntervalSecs"` + ErrorRetryWaitSecs int `yaml:"errorRetryWaitSecs"` + TimerJitterCoefficient float64 `yaml:"timerJitterCoefficient"` + } + // DCRedirectionPolicy contains the frontend datacenter redirection policy DCRedirectionPolicy struct { Policy string `yaml:"policy"` diff --git a/common/service/service.go b/common/service/service.go index 013cc740b4e..bde5196b0e8 100644 --- a/common/service/service.go +++ b/common/service/service.go @@ -203,7 +203,7 @@ func (h *serviceImpl) Start() { h.hostInfo = hostInfo h.clientBean, err = client.NewClientBean( - client.NewRPCClientFactory(h.rpcFactory, h.membershipMonitor, h.metricsClient, h.dynamicCollection, h.numberOfHistoryShards), + client.NewRPCClientFactory(h.rpcFactory, h.membershipMonitor, h.metricsClient, h.dynamicCollection, h.numberOfHistoryShards, h.logger), h.dispatcherProvider, h.clusterMetadata, ) diff --git a/config/development_active.yaml b/config/development_active.yaml index 4e8872339f9..7ce4079e2cc 100644 --- a/config/development_active.yaml +++ b/config/development_active.yaml @@ -1,7 +1,7 @@ persistence: defaultStore: cass-default visibilityStore: cass-visibility - numHistoryShards: 1 + numHistoryShards: 4 datastores: cass-default: cassandra: @@ -67,6 +67,13 @@ services: clusterMetadata: enableGlobalDomain: true + replicationConsumer: + type: rpc + fetcher: + rpcParallelism: 1 + aggregationIntervalSecs: 2 + errorRetryWaitSecs: 1 + timerJitterCoefficient: 0.15 failoverVersionIncrement: 10 masterClusterName: "active" currentClusterName: "active" diff --git a/config/development_standby.yaml b/config/development_standby.yaml index ee19d2bde67..9e8626ac0de 100644 --- a/config/development_standby.yaml +++ b/config/development_standby.yaml @@ -1,7 +1,7 @@ persistence: defaultStore: cass-default visibilityStore: cass-visibility - numHistoryShards: 1 + numHistoryShards: 4 datastores: cass-default: cassandra: @@ -67,6 +67,13 @@ services: clusterMetadata: enableGlobalDomain: true + replicationConsumer: + type: rpc + fetcher: + rpcParallelism: 1 + aggregationIntervalSecs: 2 + errorRetryWaitSecs: 1 + timerJitterCoefficient: 0.15 failoverVersionIncrement: 10 masterClusterName: "active" currentClusterName: "standby" diff --git a/host/testcluster.go b/host/testcluster.go index 26063f528f2..553498d20fd 100644 --- a/host/testcluster.go +++ b/host/testcluster.go @@ -107,6 +107,7 @@ func NewCluster(options *TestClusterConfig, logger log.Logger) (*TestCluster, er options.ClusterMetadata.MasterClusterName, options.ClusterMetadata.CurrentClusterName, options.ClusterMetadata.ClusterInformation, + options.ClusterMetadata.ReplicationConsumer, ) } diff --git a/host/testdata/xdc_integration_test_clusters.yaml b/host/testdata/xdc_integration_test_clusters.yaml index bc04fc25047..5f20cfd0af2 100644 --- a/host/testdata/xdc_integration_test_clusters.yaml +++ b/host/testdata/xdc_integration_test_clusters.yaml @@ -2,6 +2,13 @@ dbname: integration_active clustermetadata: enableGlobalDomain: true + replicationConsumer: + type: rpc + fetcher: + rpcParallelism: 1 + aggregationIntervalSecs: 2 + errorRetryWaitSecs: 1 + timerJitterCoefficient: 0.15 failoverVersionIncrement: 10 masterClusterName: "active" currentClusterName: "active" @@ -54,6 +61,13 @@ dbname: integration_standby clustermetadata: enableGlobalDomain: true + replicationConsumer: + type: rpc + fetcher: + rpcParallelism: 1 + aggregationIntervalSecs: 2 + errorRetryWaitSecs: 1 + timerJitterCoefficient: 0.15 failoverVersionIncrement: 10 masterClusterName: "active" currentClusterName: "standby" diff --git a/host/xdc/integration_failover_test.go b/host/xdc/integration_failover_test.go index 44f1f84ef40..7df0e66aec8 100644 --- a/host/xdc/integration_failover_test.go +++ b/host/xdc/integration_failover_test.go @@ -387,6 +387,9 @@ func (s *integrationClustersTestSuite) TestSimpleWorkflowFailover() { queryResultString := string(queryResult.Resp.QueryResult) s.Equal("query-result", queryResultString) + // Wait a while so the events are replicated. + time.Sleep(5 * time.Second) + // call QueryWorkflow in separate goroutinue (because it is blocking). That will generate a query task go queryWorkflowFn(client2, queryType) // process that query task, which should respond via RespondQueryTaskCompleted @@ -771,7 +774,7 @@ func (s *integrationClustersTestSuite) TestStartWorkflowExecution_Failover_Workf T: s.T(), } - // make some progress in cluster 1 + // Complete the workflow in cluster 1 _, err = poller.PollAndProcessDecisionTask(false, false) s.logger.Info("PollAndProcessDecisionTask", tag.Error(err)) s.Nil(err) @@ -793,14 +796,14 @@ func (s *integrationClustersTestSuite) TestStartWorkflowExecution_Failover_Workf // wait till failover completed time.Sleep(cacheRefreshInterval) - // start the workflow in cluster 2 with ID reuse policy being allow if last run fails + // start the same workflow in cluster 2 is not allowed if policy is AllowDuplicateFailedOnly startReq.RequestId = common.StringPtr(uuid.New()) startReq.WorkflowIdReusePolicy = workflow.WorkflowIdReusePolicyAllowDuplicateFailedOnly.Ptr() we, err = client2.StartWorkflowExecution(createContext(), startReq) s.IsType(&workflow.WorkflowExecutionAlreadyStartedError{}, err) s.Nil(we) - // start the workflow in cluster 2 with ID reuse policy being reject ID reuse + // start the same workflow in cluster 2 is not allowed if policy is RejectDuplicate startReq.RequestId = common.StringPtr(uuid.New()) startReq.WorkflowIdReusePolicy = workflow.WorkflowIdReusePolicyRejectDuplicate.Ptr() we, err = client2.StartWorkflowExecution(createContext(), startReq) diff --git a/idl/github.com/uber/cadence/cadence.thrift b/idl/github.com/uber/cadence/cadence.thrift index 15aab832d12..4b068c1b823 100644 --- a/idl/github.com/uber/cadence/cadence.thrift +++ b/idl/github.com/uber/cadence/cadence.thrift @@ -19,6 +19,7 @@ // THE SOFTWARE. include "shared.thrift" +include "replicator.thrift" namespace java com.uber.cadence @@ -577,4 +578,12 @@ service WorkflowService { 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError, ) + replicator.GetReplicationMessagesResponse GetReplicationMessages(1: replicator.GetReplicationMessagesRequest request) + throws ( + 1: shared.BadRequestError badRequestError, + 2: shared.InternalServiceError internalServiceError, + 3: shared.LimitExceededError limitExceededError, + 4: shared.ServiceBusyError serviceBusyError, + 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError, + ) } diff --git a/idl/github.com/uber/cadence/history.thrift b/idl/github.com/uber/cadence/history.thrift index d3a2b0cb6fa..4727bef1f4a 100644 --- a/idl/github.com/uber/cadence/history.thrift +++ b/idl/github.com/uber/cadence/history.thrift @@ -19,6 +19,7 @@ // THE SOFTWARE. include "shared.thrift" +include "replicator.thrift" namespace java com.uber.cadence.history @@ -690,4 +691,13 @@ service HistoryService { 2: shared.InternalServiceError internalServiceError, 3: shared.AccessDeniedError accessDeniedError, ) + + replicator.GetReplicationMessagesResponse GetReplicationMessages(1: replicator.GetReplicationMessagesRequest request) + throws ( + 1: shared.BadRequestError badRequestError, + 2: shared.InternalServiceError internalServiceError, + 3: shared.LimitExceededError limitExceededError, + 4: shared.ServiceBusyError serviceBusyError, + 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError, + ) } diff --git a/idl/github.com/uber/cadence/replicator.thrift b/idl/github.com/uber/cadence/replicator.thrift index 9b90b49fa68..310f180aefc 100644 --- a/idl/github.com/uber/cadence/replicator.thrift +++ b/idl/github.com/uber/cadence/replicator.thrift @@ -21,7 +21,6 @@ namespace java com.uber.cadence.replicator include "shared.thrift" -include "history.thrift" enum ReplicationTaskType { Domain @@ -95,6 +94,7 @@ struct SyncActicvityTaskAttributes { struct ReplicationTask { 10: optional ReplicationTaskType taskType + 11: optional i64 (js.type = "Long") sourceTaskId 20: optional DomainTaskAttributes domainTaskAttributes 30: optional HistoryTaskAttributes historyTaskAttributes 40: optional SyncShardStatusTaskAttributes syncShardStatusTaskAttributes @@ -102,3 +102,26 @@ struct ReplicationTask { 60: optional HistoryMetadataTaskAttributes historyMetadataTaskAttributes } +struct ReplicationToken { + 10: optional i32 shardID + // lastRetrivedMessageId is where the next fetch should begin with + 20: optional i64 (js.type = "Long") lastRetrivedMessageId + // lastProcessedMessageId is the last messageId that is processed on the passive side. + // This can be different than lastRetrivedMessageId if passive side supports prefetching messages. + 30: optional i64 (js.type = "Long") lastProcessedMessageId +} + +struct ReplicationMessages { + 10: optional list replicationTasks + // This can be different than the last taskId in the above list, because sender can decide to skip tasks (e.g. for completed workflows). + 20: optional i64 (js.type = "Long") lastRetrivedMessageId + 30: optional bool hasMore // Hint for flow control +} + +struct GetReplicationMessagesRequest { + 10: optional list tokens +} + +struct GetReplicationMessagesResponse { + 10: optional map messagesByShard +} \ No newline at end of file diff --git a/idl/github.com/uber/cadence/sqlblobs.thrift b/idl/github.com/uber/cadence/sqlblobs.thrift index f8596c6b9b7..74910501a2d 100644 --- a/idl/github.com/uber/cadence/sqlblobs.thrift +++ b/idl/github.com/uber/cadence/sqlblobs.thrift @@ -32,6 +32,7 @@ struct ShardInfo { 34: optional map clusterTransferAckLevel 36: optional map clusterTimerAckLevel 38: optional string owner + 40: optional map clusterReplicationLevel } struct DomainInfo { diff --git a/schema/cassandra/cadence/schema.cql b/schema/cassandra/cadence/schema.cql index b858de39e11..d54eec9a187 100644 --- a/schema/cassandra/cadence/schema.cql +++ b/schema/cassandra/cadence/schema.cql @@ -15,6 +15,8 @@ CREATE TYPE shard ( -- Mapping of cluster to corresponding timer ack level cluster_timer_ack_level map, domain_notification_version bigint, -- the global domain change version this shard is aware of + -- Mapping of (remote) cluster to corresponding replication level (last replicated task_id) + cluster_replication_level map, ); --- Workflow execution and mutable state --- diff --git a/schema/cassandra/cadence/versioned/v0.22/cluster_replication_level.cql b/schema/cassandra/cadence/versioned/v0.22/cluster_replication_level.cql new file mode 100644 index 00000000000..a0a4722e8d7 --- /dev/null +++ b/schema/cassandra/cadence/versioned/v0.22/cluster_replication_level.cql @@ -0,0 +1 @@ +ALTER TYPE shard ADD cluster_replication_level map; \ No newline at end of file diff --git a/schema/cassandra/cadence/versioned/v0.22/manifest.json b/schema/cassandra/cadence/versioned/v0.22/manifest.json new file mode 100644 index 00000000000..0d8ab1931ad --- /dev/null +++ b/schema/cassandra/cadence/versioned/v0.22/manifest.json @@ -0,0 +1,8 @@ +{ + "CurrVersion": "0.22", + "MinCompatibleVersion": "0.22", + "Description": "Add per cluster replication level (last replicated task_id) to shard info", + "SchemaUpdateCqlFiles": [ + "cluster_replication_level.cql" + ] +} \ No newline at end of file diff --git a/service/frontend/dcRedirectionHandler.go b/service/frontend/dcRedirectionHandler.go index ba41915fe76..49ac4e9198e 100644 --- a/service/frontend/dcRedirectionHandler.go +++ b/service/frontend/dcRedirectionHandler.go @@ -27,6 +27,7 @@ import ( "github.com/uber/cadence/.gen/go/cadence/workflowserviceserver" "github.com/uber/cadence/.gen/go/health" "github.com/uber/cadence/.gen/go/health/metaserver" + "github.com/uber/cadence/.gen/go/replicator" "github.com/uber/cadence/.gen/go/shared" "github.com/uber/cadence/client" "github.com/uber/cadence/common" @@ -38,6 +39,8 @@ import ( "github.com/uber/cadence/common/service/config" ) +var _ workflowserviceserver.Interface = (*DCRedirectionHandlerImpl)(nil) + type ( clientBeanProvider func() client.Bean @@ -1108,6 +1111,14 @@ func (handler *DCRedirectionHandlerImpl) TerminateWorkflowExecution( return err } +// GetReplicationMessages API call +func (handler *DCRedirectionHandlerImpl) GetReplicationMessages( + ctx context.Context, + request *replicator.GetReplicationMessagesRequest, +) (*replicator.GetReplicationMessagesResponse, error) { + return handler.frontendHandler.GetReplicationMessages(ctx, request) +} + func (handler *DCRedirectionHandlerImpl) beforeCall( scope int, ) (metrics.Scope, time.Time) { diff --git a/service/frontend/workflowHandler.go b/service/frontend/workflowHandler.go index 740596b598c..d51a918e81b 100644 --- a/service/frontend/workflowHandler.go +++ b/service/frontend/workflowHandler.go @@ -33,6 +33,7 @@ import ( "github.com/uber/cadence/.gen/go/health/metaserver" h "github.com/uber/cadence/.gen/go/history" m "github.com/uber/cadence/.gen/go/matching" + "github.com/uber/cadence/.gen/go/replicator" "github.com/uber/cadence/.gen/go/shared" gen "github.com/uber/cadence/.gen/go/shared" "github.com/uber/cadence/client/history" @@ -3279,3 +3280,28 @@ func (wh *WorkflowHandler) allow(d domainGetter) bool { } return wh.rateLimiter.Allow(quotas.Info{Domain: domain}) } + +// GetReplicationMessages returns new replication tasks since the read level provided in the token. +func (wh *WorkflowHandler) GetReplicationMessages( + ctx context.Context, + request *replicator.GetReplicationMessagesRequest, +) (resp *replicator.GetReplicationMessagesResponse, err error) { + defer log.CapturePanic(wh.GetLogger(), &err) + + scope, sw := wh.startRequestProfile(metrics.FrontendGetReplicationTasksScope) + defer sw.Stop() + + if err := wh.versionChecker.checkClientVersion(ctx); err != nil { + return nil, wh.error(err, scope) + } + + if request == nil { + return nil, wh.error(errRequestNotSet, scope) + } + + resp, err = wh.history.GetReplicationMessages(ctx, request) + if err != nil { + return nil, wh.error(err, scope) + } + return resp, nil +} diff --git a/service/frontend/workflowHandler_mock.go b/service/frontend/workflowHandler_mock.go index a46e0d3fd91..342aeccdffe 100644 --- a/service/frontend/workflowHandler_mock.go +++ b/service/frontend/workflowHandler_mock.go @@ -24,6 +24,7 @@ import ( "context" "github.com/stretchr/testify/mock" + "github.com/uber/cadence/.gen/go/replicator" "github.com/uber/cadence/.gen/go/shared" ) @@ -719,3 +720,26 @@ func (_m *MockWorkflowHandler) UpdateDomain(ctx context.Context, UpdateRequest * return r0, r1 } + +// GetReplicationMessages provides a mock function with given fields: ctx, Request +func (_m *MockWorkflowHandler) GetReplicationMessages(ctx context.Context, Request *replicator.GetReplicationMessagesRequest) (*replicator.GetReplicationMessagesResponse, error) { + ret := _m.Called(ctx, Request) + + var r0 *replicator.GetReplicationMessagesResponse + if rf, ok := ret.Get(0).(func(context.Context, *replicator.GetReplicationMessagesRequest) *replicator.GetReplicationMessagesResponse); ok { + r0 = rf(ctx, Request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*replicator.GetReplicationMessagesResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *replicator.GetReplicationMessagesRequest) error); ok { + r1 = rf(ctx, Request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/service/history/MockHistoryEngine.go b/service/history/MockHistoryEngine.go index 20d148f7fec..720ab5ae6ea 100644 --- a/service/history/MockHistoryEngine.go +++ b/service/history/MockHistoryEngine.go @@ -25,6 +25,7 @@ import ( "github.com/stretchr/testify/mock" gohistory "github.com/uber/cadence/.gen/go/history" + "github.com/uber/cadence/.gen/go/replicator" "github.com/uber/cadence/.gen/go/shared" "github.com/uber/cadence/common/persistence" ) @@ -515,3 +516,26 @@ func (_m *MockHistoryEngine) NotifyNewReplicationTasks(tasks []persistence.Task) func (_m *MockHistoryEngine) NotifyNewTimerTasks(tasks []persistence.Task) { _m.Called(tasks) } + +// GetReplicationMessages is mock implementation for GetReplicationTasks of HistoryEngine +func (_m *MockHistoryEngine) GetReplicationMessages(ctx context.Context, taskID int64) (*replicator.ReplicationMessages, error) { + ret := _m.Called(ctx, taskID) + + var r0 *replicator.ReplicationMessages + if rf, ok := ret.Get(0).(func(int64) *replicator.ReplicationMessages); ok { + r0 = rf(taskID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*replicator.ReplicationMessages) + } + } + + var r1 error + if rf, ok := ret.Get(0).(func(int64) error); ok { + r1 = rf(taskID) + } else { + r1 = ret.Error(0) + } + + return r0, r1 +} diff --git a/service/history/conflictResolver_test.go b/service/history/conflictResolver_test.go index a276147f999..8bff3cbc7dc 100644 --- a/service/history/conflictResolver_test.go +++ b/service/history/conflictResolver_test.go @@ -24,11 +24,11 @@ import ( "testing" "time" + "github.com/golang/mock/gomock" "github.com/pborman/uuid" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" "github.com/uber-go/tally" - "github.com/uber/cadence/.gen/go/shared" "github.com/uber/cadence/client" "github.com/uber/cadence/common" @@ -49,6 +49,7 @@ type ( conflictResolverSuite struct { suite.Suite logger log.Logger + mockCtrl *gomock.Controller mockExecutionMgr *mocks.ExecutionManager mockHistoryMgr *mocks.HistoryManager mockHistoryV2Mgr *mocks.HistoryV2Manager @@ -64,7 +65,7 @@ type ( mockClientBean *client.MockClientBean mockEventsCache *MockEventsCache mockTxProcessor *MockTransferQueueProcessor - mockReplicationProcessor *mockQueueProcessor + mockReplicationProcessor *MockReplicatorQueueProcessor mockTimerProcessor *MockTimerQueueProcessor conflictResolver *conflictResolverImpl @@ -85,6 +86,7 @@ func (s *conflictResolverSuite) TearDownSuite() { func (s *conflictResolverSuite) SetupTest() { s.logger = loggerimpl.NewDevelopmentForTest(s.Suite) + s.mockCtrl = gomock.NewController(s.T()) s.mockHistoryMgr = &mocks.HistoryManager{} s.mockHistoryV2Mgr = &mocks.HistoryV2Manager{} s.mockExecutionMgr = &mocks.ExecutionManager{} @@ -120,8 +122,8 @@ func (s *conflictResolverSuite) SetupTest() { s.mockClusterMetadata.On("GetCurrentClusterName").Return(cluster.TestCurrentClusterName) s.mockTxProcessor = &MockTransferQueueProcessor{} s.mockTxProcessor.On("NotifyNewTask", mock.Anything, mock.Anything).Maybe() - s.mockReplicationProcessor = &mockQueueProcessor{} - s.mockReplicationProcessor.On("notifyNewTask").Maybe() + s.mockReplicationProcessor = NewMockReplicatorQueueProcessor(s.mockCtrl) + s.mockReplicationProcessor.EXPECT().notifyNewTask().AnyTimes() s.mockTimerProcessor = &MockTimerQueueProcessor{} s.mockTimerProcessor.On("NotifyNewTimers", mock.Anything, mock.Anything).Maybe() h := &historyEngineImpl{ @@ -143,6 +145,7 @@ func (s *conflictResolverSuite) SetupTest() { } func (s *conflictResolverSuite) TearDownTest() { + s.mockCtrl.Finish() s.mockHistoryMgr.AssertExpectations(s.T()) s.mockHistoryV2Mgr.AssertExpectations(s.T()) s.mockExecutionMgr.AssertExpectations(s.T()) @@ -153,7 +156,6 @@ func (s *conflictResolverSuite) TearDownTest() { s.mockDomainCache.AssertExpectations(s.T()) s.mockEventsCache.AssertExpectations(s.T()) s.mockTxProcessor.AssertExpectations(s.T()) - s.mockReplicationProcessor.AssertExpectations(s.T()) s.mockTimerProcessor.AssertExpectations(s.T()) } diff --git a/service/history/handler.go b/service/history/handler.go index 5c22a6162fc..da2efd9453d 100644 --- a/service/history/handler.go +++ b/service/history/handler.go @@ -30,6 +30,7 @@ import ( "github.com/uber/cadence/.gen/go/health/metaserver" hist "github.com/uber/cadence/.gen/go/history" "github.com/uber/cadence/.gen/go/history/historyserviceserver" + r "github.com/uber/cadence/.gen/go/replicator" gen "github.com/uber/cadence/.gen/go/shared" hc "github.com/uber/cadence/client/history" "github.com/uber/cadence/client/matching" @@ -43,6 +44,7 @@ import ( "github.com/uber/cadence/common/persistence" "github.com/uber/cadence/common/quotas" "github.com/uber/cadence/common/service" + "github.com/uber/cadence/service/worker/replicator" "go.uber.org/cadence/.gen/go/cadence/workflowserviceclient" "go.uber.org/yarpc/yarpcerrors" ) @@ -50,25 +52,27 @@ import ( // Handler - Thrift handler interface for history service type ( Handler struct { - shardManager persistence.ShardManager - metadataMgr persistence.MetadataManager - visibilityMgr persistence.VisibilityManager - historyMgr persistence.HistoryManager - historyV2Mgr persistence.HistoryV2Manager - executionMgrFactory persistence.ExecutionManagerFactory - domainCache cache.DomainCache - historyServiceClient hc.Client - matchingServiceClient matching.Client - publicClient workflowserviceclient.Interface - hServiceResolver membership.ServiceResolver - controller *shardController - tokenSerializer common.TaskTokenSerializer - startWG sync.WaitGroup - metricsClient metrics.Client - config *Config - historyEventNotifier historyEventNotifier - publisher messaging.Producer - rateLimiter quotas.Limiter + shardManager persistence.ShardManager + metadataMgr persistence.MetadataManager + visibilityMgr persistence.VisibilityManager + historyMgr persistence.HistoryManager + historyV2Mgr persistence.HistoryV2Manager + executionMgrFactory persistence.ExecutionManagerFactory + domainCache cache.DomainCache + historyServiceClient hc.Client + matchingServiceClient matching.Client + publicClient workflowserviceclient.Interface + hServiceResolver membership.ServiceResolver + controller *shardController + tokenSerializer common.TaskTokenSerializer + startWG sync.WaitGroup + metricsClient metrics.Client + config *Config + historyEventNotifier historyEventNotifier + publisher messaging.Producer + rateLimiter quotas.Limiter + replicationTaskFetchers *ReplicationTaskFetchers + domainReplicator replicator.DomainReplicator service.Service } ) @@ -89,11 +93,20 @@ var ( ) // NewHandler creates a thrift handler for the history service -func NewHandler(sVice service.Service, config *Config, shardManager persistence.ShardManager, - metadataMgr persistence.MetadataManager, visibilityMgr persistence.VisibilityManager, - historyMgr persistence.HistoryManager, historyV2Mgr persistence.HistoryV2Manager, - executionMgrFactory persistence.ExecutionManagerFactory, domainCache cache.DomainCache, - publicClient workflowserviceclient.Interface) *Handler { +func NewHandler( + sVice service.Service, + config *Config, + shardManager persistence.ShardManager, + metadataMgr persistence.MetadataManager, + visibilityMgr persistence.VisibilityManager, + historyMgr persistence.HistoryManager, + historyV2Mgr persistence.HistoryV2Manager, + executionMgrFactory persistence.ExecutionManagerFactory, + domainCache cache.DomainCache, + publicClient workflowserviceclient.Interface, +) *Handler { + domainReplicator := replicator.NewDomainReplicator(metadataMgr, sVice.GetLogger()) + handler := &Handler{ Service: sVice, config: config, @@ -110,7 +123,8 @@ func NewHandler(sVice service.Service, config *Config, shardManager persistence. return float64(config.RPS()) }, ), - publicClient: publicClient, + publicClient: publicClient, + domainReplicator: domainReplicator, } // prevent us from trying to serve requests before shard controller is started and ready @@ -154,7 +168,6 @@ func (h *Handler) Start() error { } h.hServiceResolver = hServiceResolver - // TODO when global domain is enabled, uncomment the line below and remove the line after if h.GetClusterMetadata().IsGlobalDomainEnabled() { var err error h.publisher, err = h.GetMessagingClient().NewProducerWithClusterName(h.GetClusterMetadata().GetCurrentClusterName()) @@ -163,6 +176,14 @@ func (h *Handler) Start() error { } } + h.replicationTaskFetchers = NewReplicationTaskFetchers( + h.GetLogger(), + h.GetClusterMetadata().GetReplicationConsumerConfig(), + h.Service.GetClusterMetadata(), + h.Service.GetClientBean()) + + h.replicationTaskFetchers.Start() + h.controller = newShardController(h.Service, h.GetHostInfo(), hServiceResolver, h.shardManager, h.historyMgr, h.historyV2Mgr, h.domainCache, h.executionMgrFactory, h, h.config, h.GetLogger(), h.GetMetricsClient()) h.metricsClient = h.GetMetricsClient() @@ -170,12 +191,14 @@ func (h *Handler) Start() error { // events notifier must starts before controller h.historyEventNotifier.Start() h.controller.Start() + h.startWG.Done() return nil } // Stop stops the handler func (h *Handler) Stop() { + h.replicationTaskFetchers.Stop() h.domainCache.Stop() h.controller.Stop() h.shardManager.Close() @@ -193,7 +216,7 @@ func (h *Handler) Stop() { // CreateEngine is implementation for HistoryEngineFactory used for creating the engine instance for shard func (h *Handler) CreateEngine(context ShardContext) Engine { return NewEngineWithShardContext(context, h.visibilityMgr, h.matchingServiceClient, h.historyServiceClient, - h.publicClient, h.historyEventNotifier, h.publisher, h.config) + h.publicClient, h.historyEventNotifier, h.publisher, h.config, h.replicationTaskFetchers, h.domainReplicator) } // Health is for health check @@ -1238,6 +1261,49 @@ func (h *Handler) SyncActivity(ctx context.Context, syncActivityRequest *hist.Sy return nil } +// GetReplicationMessages is called by remote peers to get replicated messages for cross DC replication +func (h *Handler) GetReplicationMessages( + ctx context.Context, + request *r.GetReplicationMessagesRequest, +) (*r.GetReplicationMessagesResponse, error) { + + var wg sync.WaitGroup + wg.Add(len(request.Tokens)) + result := new(sync.Map) + + for _, token := range request.Tokens { + go func(token *r.ReplicationToken) { + defer wg.Done() + + engine, err := h.controller.getEngineForShard(int(token.GetShardID())) + if err != nil { + h.GetLogger().Warn("history engine not found for shard", tag.Error(err)) + return + } + + tasks, err := engine.GetReplicationMessages(ctx, token.GetLastRetrivedMessageId()) + if err != nil { + h.GetLogger().Warn("failed to get replication tasks for shard", tag.Error(err)) + return + } + + result.Store(token.GetShardID(), tasks) + }(token) + } + + wg.Wait() + + messagesByShard := make(map[int32]*r.ReplicationMessages) + result.Range(func(key, value interface{}) bool { + shardID := key.(int32) + tasks := value.(*r.ReplicationMessages) + messagesByShard[shardID] = tasks + return true + }) + + return &r.GetReplicationMessagesResponse{MessagesByShard: messagesByShard}, nil +} + // convertError is a helper method to convert ShardOwnershipLostError from persistence layer returned by various // HistoryEngine API calls to ShardOwnershipLost error return by HistoryService for client to be redirected to the // correct shard. diff --git a/service/history/historyEngine.go b/service/history/historyEngine.go index 7a38c68d25f..f2b0450f4cb 100644 --- a/service/history/historyEngine.go +++ b/service/history/historyEngine.go @@ -29,6 +29,7 @@ import ( "github.com/pborman/uuid" h "github.com/uber/cadence/.gen/go/history" + r "github.com/uber/cadence/.gen/go/replicator" workflow "github.com/uber/cadence/.gen/go/shared" hc "github.com/uber/cadence/client/history" "github.com/uber/cadence/client/matching" @@ -43,7 +44,9 @@ import ( "github.com/uber/cadence/common/messaging" "github.com/uber/cadence/common/metrics" "github.com/uber/cadence/common/persistence" + "github.com/uber/cadence/common/service/config" warchiver "github.com/uber/cadence/service/worker/archiver" + "github.com/uber/cadence/service/worker/replicator" "go.uber.org/cadence/.gen/go/cadence/workflowserviceclient" ) @@ -56,29 +59,30 @@ const ( type ( historyEngineImpl struct { - currentClusterName string - shard ShardContext - timeSource clock.TimeSource - decisionHandler decisionHandler - clusterMetadata cluster.Metadata - historyMgr persistence.HistoryManager - historyV2Mgr persistence.HistoryV2Manager - executionManager persistence.ExecutionManager - visibilityMgr persistence.VisibilityManager - txProcessor transferQueueProcessor - timerProcessor timerQueueProcessor - taskAllocator taskAllocator - replicator *historyReplicator - replicatorProcessor queueProcessor - historyEventNotifier historyEventNotifier - tokenSerializer common.TaskTokenSerializer - historyCache *historyCache - metricsClient metrics.Client - logger log.Logger - throttledLogger log.Logger - config *Config - archivalClient warchiver.Client - resetor workflowResetor + currentClusterName string + shard ShardContext + timeSource clock.TimeSource + decisionHandler decisionHandler + clusterMetadata cluster.Metadata + historyMgr persistence.HistoryManager + historyV2Mgr persistence.HistoryV2Manager + executionManager persistence.ExecutionManager + visibilityMgr persistence.VisibilityManager + txProcessor transferQueueProcessor + timerProcessor timerQueueProcessor + taskAllocator taskAllocator + replicator *historyReplicator + replicatorProcessor ReplicatorQueueProcessor + historyEventNotifier historyEventNotifier + tokenSerializer common.TaskTokenSerializer + historyCache *historyCache + metricsClient metrics.Client + logger log.Logger + throttledLogger log.Logger + config *Config + archivalClient warchiver.Client + resetor workflowResetor + replicationTaskProcessors []*ReplicationTaskProcessor } ) @@ -134,6 +138,8 @@ func NewEngineWithShardContext( historyEventNotifier historyEventNotifier, publisher messaging.Producer, config *Config, + replicationTaskFetchers *ReplicationTaskFetchers, + domainReplicator replicator.DomainReplicator, ) Engine { currentClusterName := shard.GetService().GetClusterMetadata().GetCurrentClusterName() @@ -181,6 +187,14 @@ func NewEngineWithShardContext( } historyEngImpl.resetor = newWorkflowResetor(historyEngImpl) historyEngImpl.decisionHandler = newDecisionHandler(historyEngImpl) + + var replicationTaskProcessors []*ReplicationTaskProcessor + for _, replicationTaskFetcher := range replicationTaskFetchers.GetFetchers() { + replicationTaskProcessor := NewReplicationTaskProcessor(shard, historyEngImpl, domainReplicator, shard.GetMetricsClient(), replicationTaskFetcher) + replicationTaskProcessors = append(replicationTaskProcessors, replicationTaskProcessor) + } + historyEngImpl.replicationTaskProcessors = replicationTaskProcessors + shard.SetEngine(historyEngImpl) return historyEngImpl @@ -197,9 +211,15 @@ func (e *historyEngineImpl) Start() { e.txProcessor.Start() e.timerProcessor.Start() - if e.replicatorProcessor != nil { + + clusterMetadata := e.shard.GetClusterMetadata() + if e.replicatorProcessor != nil && clusterMetadata.GetReplicationConsumerConfig().Type != config.ReplicationConsumerTypeRPC { e.replicatorProcessor.Start() } + + for _, replicationTaskProcessor := range e.replicationTaskProcessors { + replicationTaskProcessor.Start() + } } // Stop the service. @@ -213,6 +233,10 @@ func (e *historyEngineImpl) Stop() { e.replicatorProcessor.Stop() } + for _, replicationTaskProcessor := range e.replicationTaskProcessors { + replicationTaskProcessor.Stop() + } + // unset the failover callback e.shard.GetDomainCache().UnregisterDomainChangeCallback(e.shard.GetShardID()) } @@ -2336,3 +2360,7 @@ func getWorkflowAlreadyStartedError(errMsg string, createRequestID string, workf RunId: common.StringPtr(fmt.Sprintf("%v", runID)), } } + +func (e *historyEngineImpl) GetReplicationMessages(ctx ctx.Context, taskID int64) (*r.ReplicationMessages, error) { + return e.replicatorProcessor.getTasks(taskID) +} diff --git a/service/history/historyEngine2_test.go b/service/history/historyEngine2_test.go index 7d4bbd6802f..b5440a027a1 100644 --- a/service/history/historyEngine2_test.go +++ b/service/history/historyEngine2_test.go @@ -27,6 +27,7 @@ import ( "testing" "time" + "github.com/golang/mock/gomock" "github.com/pborman/uuid" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -57,6 +58,7 @@ type ( // not merely log an error *require.Assertions historyEngine *historyEngineImpl + mockCtrl *gomock.Controller mockMatchingClient *mocks.MatchingClient mockArchivalClient *archiver.ClientMock mockHistoryClient *mocks.HistoryClient @@ -74,7 +76,7 @@ type ( mockDomainCache *cache.DomainCacheMock mockEventsCache *MockEventsCache mockTxProcessor *MockTransferQueueProcessor - mockReplicationProcessor *mockQueueProcessor + mockReplicationProcessor *MockReplicatorQueueProcessor mockTimerProcessor *MockTimerQueueProcessor shardClosedCh chan int @@ -101,6 +103,7 @@ func (s *engine2Suite) SetupTest() { s.Assertions = require.New(s.T()) shardID := 0 + s.mockCtrl = gomock.NewController(s.T()) s.mockMatchingClient = &mocks.MatchingClient{} s.mockArchivalClient = &archiver.ClientMock{} s.mockHistoryClient = &mocks.HistoryClient{} @@ -149,8 +152,8 @@ func (s *engine2Suite) SetupTest() { s.mockClusterMetadata.On("ClusterNameForFailoverVersion", common.EmptyVersion).Return(cluster.TestCurrentClusterName) s.mockTxProcessor = &MockTransferQueueProcessor{} s.mockTxProcessor.On("NotifyNewTask", mock.Anything, mock.Anything).Maybe() - s.mockReplicationProcessor = &mockQueueProcessor{} - s.mockReplicationProcessor.On("notifyNewTask").Maybe() + s.mockReplicationProcessor = NewMockReplicatorQueueProcessor(s.mockCtrl) + s.mockReplicationProcessor.EXPECT().notifyNewTask().AnyTimes() s.mockTimerProcessor = &MockTimerQueueProcessor{} s.mockTimerProcessor.On("NotifyNewTimers", mock.Anything, mock.Anything).Maybe() @@ -182,6 +185,7 @@ func (s *engine2Suite) SetupTest() { } func (s *engine2Suite) TearDownTest() { + s.mockCtrl.Finish() s.mockMatchingClient.AssertExpectations(s.T()) s.mockExecutionMgr.AssertExpectations(s.T()) s.mockHistoryMgr.AssertExpectations(s.T()) @@ -192,7 +196,6 @@ func (s *engine2Suite) TearDownTest() { s.mockClientBean.AssertExpectations(s.T()) s.mockArchivalClient.AssertExpectations(s.T()) s.mockTxProcessor.AssertExpectations(s.T()) - s.mockReplicationProcessor.AssertExpectations(s.T()) s.mockTimerProcessor.AssertExpectations(s.T()) } diff --git a/service/history/historyEngine3_eventsv2_test.go b/service/history/historyEngine3_eventsv2_test.go index ea089bd5e26..6a59fd8c84c 100644 --- a/service/history/historyEngine3_eventsv2_test.go +++ b/service/history/historyEngine3_eventsv2_test.go @@ -25,12 +25,12 @@ import ( "testing" "time" + "github.com/golang/mock/gomock" "github.com/pborman/uuid" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/uber-go/tally" - h "github.com/uber/cadence/.gen/go/history" workflow "github.com/uber/cadence/.gen/go/shared" "github.com/uber/cadence/client" @@ -55,6 +55,7 @@ type ( // not merely log an error *require.Assertions historyEngine *historyEngineImpl + mockCtrl *gomock.Controller mockMatchingClient *mocks.MatchingClient mockHistoryClient *mocks.HistoryClient mockMetadataMgr *mocks.MetadataManager @@ -72,7 +73,7 @@ type ( mockArchivalClient *archiver.ClientMock mockEventsCache *MockEventsCache mockTxProcessor *MockTransferQueueProcessor - mockReplicationProcessor *mockQueueProcessor + mockReplicationProcessor *MockReplicatorQueueProcessor mockTimerProcessor *MockTimerQueueProcessor shardClosedCh chan int @@ -99,6 +100,7 @@ func (s *engine3Suite) SetupTest() { s.Assertions = require.New(s.T()) shardID := 0 + s.mockCtrl = gomock.NewController(s.T()) s.mockMatchingClient = &mocks.MatchingClient{} s.mockHistoryClient = &mocks.HistoryClient{} s.mockMetadataMgr = &mocks.MetadataManager{} @@ -144,8 +146,8 @@ func (s *engine3Suite) SetupTest() { s.mockClusterMetadata.On("ClusterNameForFailoverVersion", common.EmptyVersion).Return(cluster.TestCurrentClusterName) s.mockTxProcessor = &MockTransferQueueProcessor{} s.mockTxProcessor.On("NotifyNewTask", mock.Anything, mock.Anything).Maybe() - s.mockReplicationProcessor = &mockQueueProcessor{} - s.mockReplicationProcessor.On("notifyNewTask").Maybe() + s.mockReplicationProcessor = NewMockReplicatorQueueProcessor(s.mockCtrl) + s.mockReplicationProcessor.EXPECT().notifyNewTask().AnyTimes() s.mockTimerProcessor = &MockTimerQueueProcessor{} s.mockTimerProcessor.On("NotifyNewTimers", mock.Anything, mock.Anything).Maybe() @@ -177,6 +179,7 @@ func (s *engine3Suite) SetupTest() { } func (s *engine3Suite) TearDownTest() { + s.mockCtrl.Finish() s.mockMatchingClient.AssertExpectations(s.T()) s.mockExecutionMgr.AssertExpectations(s.T()) s.mockHistoryMgr.AssertExpectations(s.T()) @@ -187,7 +190,6 @@ func (s *engine3Suite) TearDownTest() { s.mockClientBean.AssertExpectations(s.T()) s.mockArchivalClient.AssertExpectations(s.T()) s.mockTxProcessor.AssertExpectations(s.T()) - s.mockReplicationProcessor.AssertExpectations(s.T()) s.mockTimerProcessor.AssertExpectations(s.T()) } diff --git a/service/history/historyEngineInterfaces.go b/service/history/historyEngineInterfaces.go index e1f0fb54b5a..0fa620a4bd9 100644 --- a/service/history/historyEngineInterfaces.go +++ b/service/history/historyEngineInterfaces.go @@ -25,6 +25,7 @@ import ( "time" h "github.com/uber/cadence/.gen/go/history" + "github.com/uber/cadence/.gen/go/replicator" workflow "github.com/uber/cadence/.gen/go/shared" "github.com/uber/cadence/common" "github.com/uber/cadence/common/definition" @@ -70,6 +71,7 @@ type ( ReplicateRawEvents(ctx context.Context, request *h.ReplicateRawEventsRequest) error SyncShardStatus(ctx context.Context, request *h.SyncShardStatusRequest) error SyncActivity(ctx context.Context, request *h.SyncActivityRequest) error + GetReplicationMessages(ctx context.Context, taskID int64) (*replicator.ReplicationMessages, error) NotifyNewHistoryEvent(event *historyEventNotification) NotifyNewTransferTasks(tasks []persistence.Task) @@ -87,6 +89,12 @@ type ( notifyNewTask() } + // ReplicatorQueueProcessor is the interface for replicator queue processor + ReplicatorQueueProcessor interface { + queueProcessor + getTasks(readLevel int64) (*replicator.ReplicationMessages, error) + } + queueAckMgr interface { getFinishedChan() <-chan struct{} readQueueTasks() ([]queueTaskInfo, bool, error) diff --git a/service/history/historyEngine_test.go b/service/history/historyEngine_test.go index 22bdbc1c21d..ff4dcdff7ec 100644 --- a/service/history/historyEngine_test.go +++ b/service/history/historyEngine_test.go @@ -28,6 +28,7 @@ import ( "testing" "time" + "github.com/golang/mock/gomock" "github.com/pborman/uuid" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -57,6 +58,7 @@ type ( // override suite.Suite.Assertions with require.Assertions; this means that s.NotNil(nil) will stop the test, // not merely log an error *require.Assertions + mockCtrl *gomock.Controller mockHistoryEngine *historyEngineImpl mockMatchingClient *mocks.MatchingClient mockArchivalClient *archiver.ClientMock @@ -75,7 +77,7 @@ type ( mockService service.Service mockMetricClient metrics.Client mockTxProcessor *MockTransferQueueProcessor - mockReplicationProcessor *mockQueueProcessor + mockReplicationProcessor *MockReplicatorQueueProcessor mockTimerProcessor *MockTimerQueueProcessor shardClosedCh chan int @@ -108,6 +110,7 @@ func (s *engineSuite) SetupTest() { s.Assertions = require.New(s.T()) shardID := 10 + s.mockCtrl = gomock.NewController(s.T()) s.mockMatchingClient = &mocks.MatchingClient{} s.mockArchivalClient = &archiver.ClientMock{} s.mockHistoryClient = &mocks.HistoryClient{} @@ -160,8 +163,8 @@ func (s *engineSuite) SetupTest() { s.mockClusterMetadata.On("ClusterNameForFailoverVersion", common.EmptyVersion).Return(cluster.TestCurrentClusterName) s.mockTxProcessor = &MockTransferQueueProcessor{} s.mockTxProcessor.On("NotifyNewTask", mock.Anything, mock.Anything).Maybe() - s.mockReplicationProcessor = &mockQueueProcessor{} - s.mockReplicationProcessor.On("notifyNewTask").Maybe() + s.mockReplicationProcessor = NewMockReplicatorQueueProcessor(s.mockCtrl) + s.mockReplicationProcessor.EXPECT().notifyNewTask().AnyTimes() s.mockTimerProcessor = &MockTimerQueueProcessor{} s.mockTimerProcessor.On("NotifyNewTimers", mock.Anything, mock.Anything).Maybe() @@ -198,6 +201,7 @@ func (s *engineSuite) SetupTest() { } func (s *engineSuite) TearDownTest() { + s.mockCtrl.Finish() s.mockHistoryEngine.historyEventNotifier.Stop() s.mockMatchingClient.AssertExpectations(s.T()) s.mockExecutionMgr.AssertExpectations(s.T()) @@ -209,7 +213,6 @@ func (s *engineSuite) TearDownTest() { s.mockClientBean.AssertExpectations(s.T()) s.mockArchivalClient.AssertExpectations(s.T()) s.mockTxProcessor.AssertExpectations(s.T()) - s.mockReplicationProcessor.AssertExpectations(s.T()) s.mockTimerProcessor.AssertExpectations(s.T()) } diff --git a/service/history/historyReplicator_test.go b/service/history/historyReplicator_test.go index 4da3a13de26..3eec28d67c0 100644 --- a/service/history/historyReplicator_test.go +++ b/service/history/historyReplicator_test.go @@ -27,11 +27,11 @@ import ( "testing" "time" + "github.com/golang/mock/gomock" "github.com/pborman/uuid" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" "github.com/uber-go/tally" - h "github.com/uber/cadence/.gen/go/history" "github.com/uber/cadence/.gen/go/shared" workflow "github.com/uber/cadence/.gen/go/shared" @@ -59,6 +59,7 @@ type ( historyReplicatorSuite struct { suite.Suite logger log.Logger + mockCtrl *gomock.Controller mockExecutionMgr *mocks.ExecutionManager mockHistoryMgr *mocks.HistoryManager mockHistoryV2Mgr *mocks.HistoryV2Manager @@ -72,7 +73,7 @@ type ( mockClientBean *client.MockClientBean mockWorkflowResetor *mockWorkflowResetor mockTxProcessor *MockTransferQueueProcessor - mockReplicationProcessor *mockQueueProcessor + mockReplicationProcessor *MockReplicatorQueueProcessor mockTimerProcessor *MockTimerQueueProcessor historyReplicator *historyReplicator @@ -94,6 +95,7 @@ func (s *historyReplicatorSuite) TearDownSuite() { func (s *historyReplicatorSuite) SetupTest() { s.logger = loggerimpl.NewDevelopmentForTest(s.Suite) + s.mockCtrl = gomock.NewController(s.T()) s.mockHistoryMgr = &mocks.HistoryManager{} s.mockHistoryV2Mgr = &mocks.HistoryV2Manager{} s.mockExecutionMgr = &mocks.ExecutionManager{} @@ -130,8 +132,8 @@ func (s *historyReplicatorSuite) SetupTest() { s.mockClusterMetadata.On("IsGlobalDomainEnabled").Return(true) s.mockTxProcessor = &MockTransferQueueProcessor{} s.mockTxProcessor.On("NotifyNewTask", mock.Anything, mock.Anything).Maybe() - s.mockReplicationProcessor = &mockQueueProcessor{} - s.mockReplicationProcessor.On("notifyNewTask").Maybe() + s.mockReplicationProcessor = NewMockReplicatorQueueProcessor(s.mockCtrl) + s.mockReplicationProcessor.EXPECT().notifyNewTask().AnyTimes() s.mockTimerProcessor = &MockTimerQueueProcessor{} s.mockTimerProcessor.On("NotifyNewTimers", mock.Anything, mock.Anything).Maybe() @@ -161,6 +163,7 @@ func (s *historyReplicatorSuite) SetupTest() { func (s *historyReplicatorSuite) TearDownTest() { s.historyReplicator = nil + s.mockCtrl.Finish() s.mockHistoryMgr.AssertExpectations(s.T()) s.mockExecutionMgr.AssertExpectations(s.T()) s.mockShardManager.AssertExpectations(s.T()) @@ -171,7 +174,6 @@ func (s *historyReplicatorSuite) TearDownTest() { s.mockClientBean.AssertExpectations(s.T()) s.mockWorkflowResetor.AssertExpectations(s.T()) s.mockTxProcessor.AssertExpectations(s.T()) - s.mockReplicationProcessor.AssertExpectations(s.T()) s.mockTimerProcessor.AssertExpectations(s.T()) } diff --git a/service/history/historyTestBase.go b/service/history/historyTestBase.go index 14dcb911162..7641dd7b0dd 100644 --- a/service/history/historyTestBase.go +++ b/service/history/historyTestBase.go @@ -365,6 +365,27 @@ func (s *TestShardContext) GetAllTimerFailoverLevels() map[string]persistence.Ti return ret } +// GetClusterReplicationLevel test implementation +func (s *TestShardContext) GetClusterReplicationLevel(cluster string) int64 { + s.RLock() + defer s.RUnlock() + + if replicationLevel, ok := s.shardInfo.ClusterReplicationLevel[cluster]; ok { + return replicationLevel + } + + return -1 +} + +// UpdateClusterReplicationLevel test implementation +func (s *TestShardContext) UpdateClusterReplicationLevel(cluster string, lastTaskID int64) error { + s.Lock() + defer s.Unlock() + + s.shardInfo.ClusterReplicationLevel[cluster] = lastTaskID + return nil +} + // GetDomainNotificationVersion test implementation func (s *TestShardContext) GetDomainNotificationVersion() int64 { s.RLock() diff --git a/service/history/replicationTaskFetcher.go b/service/history/replicationTaskFetcher.go new file mode 100644 index 00000000000..406be004d50 --- /dev/null +++ b/service/history/replicationTaskFetcher.go @@ -0,0 +1,215 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "github.com/uber/cadence/client" + "github.com/uber/cadence/common" + "github.com/uber/cadence/common/cluster" + "sync/atomic" + "time" + + "github.com/uber/cadence/.gen/go/cadence/workflowserviceclient" + r "github.com/uber/cadence/.gen/go/replicator" + "github.com/uber/cadence/common/backoff" + "github.com/uber/cadence/common/log" + "github.com/uber/cadence/common/log/tag" + "github.com/uber/cadence/common/service/config" +) + +const ( + fetchTaskRequestTimeout = 10 * time.Second + requestChanBufferSize = 1000 +) + +type ( + // ReplicationTaskFetcher is responsible for fetching replication messages from remote DC. + ReplicationTaskFetcher struct { + status int32 + sourceCluster string + config *config.FetcherConfig + logger log.Logger + remotePeer workflowserviceclient.Interface + requestChan chan *request + done chan struct{} + } + + // ReplicationTaskFetchers is a group of fetchers, one per source DC. + ReplicationTaskFetchers struct { + status int32 + logger log.Logger + fetchers []*ReplicationTaskFetcher + } +) + +// NewReplicationTaskFetchers creates an instance of ReplicationTaskFetchers with given configs. +func NewReplicationTaskFetchers( + logger log.Logger, + consumerConfig *config.ReplicationConsumerConfig, + clusterMetadata cluster.Metadata, + clientBean client.Bean, +) *ReplicationTaskFetchers { + var fetchers []*ReplicationTaskFetcher + if consumerConfig.Type == config.ReplicationConsumerTypeRPC { + fetcherConfig := consumerConfig.FetcherConfig + for clusterName, info := range clusterMetadata.GetAllClusterInfo() { + if !info.Enabled { + continue + } + + if clusterName != clusterMetadata.GetCurrentClusterName() { + remoteFrontendClient := clientBean.GetRemoteFrontendClient(clusterName) + fetcher := newReplicationTaskFetcher(logger, clusterName, fetcherConfig, remoteFrontendClient) + fetchers = append(fetchers, fetcher) + } + } + + } + + return &ReplicationTaskFetchers{fetchers: fetchers, status: common.DaemonStatusInitialized, logger: logger} +} + +// Start starts the fetchers +func (f *ReplicationTaskFetchers) Start() { + if !atomic.CompareAndSwapInt32(&f.status, common.DaemonStatusInitialized, common.DaemonStatusStarted) { + return + } + + for _, fetcher := range f.fetchers { + fetcher.Start() + } + f.logger.Info("Replication task fetchers started.") +} + +// Stop stops the fetchers +func (f *ReplicationTaskFetchers) Stop() { + if !atomic.CompareAndSwapInt32(&f.status, common.DaemonStatusStarted, common.DaemonStatusStopped) { + return + } + + for _, fetcher := range f.fetchers { + fetcher.Stop() + } + f.logger.Info("Replication task fetchers stopped.") +} + +// GetFetchers returns all the fetchers +func (f *ReplicationTaskFetchers) GetFetchers() []*ReplicationTaskFetcher { + return f.fetchers +} + +// newReplicationTaskFetcher creates a new fetcher. +func newReplicationTaskFetcher(logger log.Logger, sourceCluster string, config *config.FetcherConfig, sourceFrontend workflowserviceclient.Interface) *ReplicationTaskFetcher { + return &ReplicationTaskFetcher{ + status: common.DaemonStatusInitialized, + config: config, + logger: logger, + remotePeer: sourceFrontend, + sourceCluster: sourceCluster, + requestChan: make(chan *request, requestChanBufferSize), + done: make(chan struct{}), + } +} + +// Start starts the fetcher +func (f *ReplicationTaskFetcher) Start() { + if !atomic.CompareAndSwapInt32(&f.status, common.DaemonStatusInitialized, common.DaemonStatusStarted) { + return + } + + for i := 0; i < f.config.RPCParallelism; i++ { + go f.fetchTasks() + } + f.logger.Info("Replication task fetcher started.", tag.ClusterName(f.sourceCluster), tag.Counter(f.config.RPCParallelism)) +} + +// Stop stops the fetcher +func (f *ReplicationTaskFetcher) Stop() { + if !atomic.CompareAndSwapInt32(&f.status, common.DaemonStatusStarted, common.DaemonStatusStopped) { + return + } + + close(f.done) + f.logger.Info("Replication task fetcher stopped.", tag.ClusterName(f.sourceCluster)) +} + +// fetchTasks collects getReplicationTasks request from shards and send out aggregated request to source frontend. +func (f *ReplicationTaskFetcher) fetchTasks() { + jitter := backoff.NewJitter() + timer := time.NewTimer(jitter.JitDuration(time.Duration(f.config.AggregationIntervalSecs)*time.Second, f.config.TimerJitterCoefficient)) + + requestByShard := make(map[int32]*request) + for { + select { + case request := <-f.requestChan: + // Here we only add the request to map. We will wait until timer fires to send the request to remote. + if req, ok := requestByShard[request.token.GetShardID()]; ok && req != request { + // The following should never happen under the assumption that only + // one processor is created per shard per source DC. + f.logger.Error("Get replication task request already exist for shard.") + close(req.respChan) + } + + requestByShard[request.token.GetShardID()] = request + case <-timer.C: + // When timer fires, we collect all the requests we have so far and attempt to send them to remote. + var tokens []*r.ReplicationToken + for _, request := range requestByShard { + tokens = append(tokens, request.token) + } + + ctx, cancel := context.WithTimeout(context.Background(), fetchTaskRequestTimeout) + request := &r.GetReplicationMessagesRequest{Tokens: tokens} + response, err := f.remotePeer.GetReplicationMessages(ctx, request) + cancel() + if err != nil { + f.logger.Error("Failed to get replication tasks", tag.Error(err)) + timer.Reset(jitter.JitDuration(time.Duration(f.config.ErrorRetryWaitSecs)*time.Second, f.config.TimerJitterCoefficient)) + continue + } + + f.logger.Debug("Successfully fetched replication tasks.", tag.Counter(len(response.MessagesByShard))) + + for shardID, tasks := range response.MessagesByShard { + request := requestByShard[shardID] + request.respChan <- tasks + close(request.respChan) + delete(requestByShard, shardID) + } + + timer.Reset(jitter.JitDuration(time.Duration(f.config.AggregationIntervalSecs)*time.Second, f.config.TimerJitterCoefficient)) + case <-f.done: + timer.Stop() + return + } + } +} + +// GetSourceCluster returns the source cluster for the fetcher +func (f *ReplicationTaskFetcher) GetSourceCluster() string { + return f.sourceCluster +} + +// GetRequestChan returns the request chan for the fetcher +func (f *ReplicationTaskFetcher) GetRequestChan() chan<- *request { + return f.requestChan +} diff --git a/service/history/replicationTaskProcessor.go b/service/history/replicationTaskProcessor.go new file mode 100644 index 00000000000..cb77d9c7470 --- /dev/null +++ b/service/history/replicationTaskProcessor.go @@ -0,0 +1,317 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "sync/atomic" + "time" + + h "github.com/uber/cadence/.gen/go/history" + r "github.com/uber/cadence/.gen/go/replicator" + "github.com/uber/cadence/.gen/go/shared" + "github.com/uber/cadence/common" + "github.com/uber/cadence/common/backoff" + "github.com/uber/cadence/common/log" + "github.com/uber/cadence/common/log/tag" + "github.com/uber/cadence/common/metrics" + "github.com/uber/cadence/service/worker/replicator" + "go.uber.org/yarpc/yarpcerrors" +) + +const ( + dropSyncShardTaskTimeThreshold = 10 * time.Minute + replicationTimeout = 30 * time.Second + taskProcessorErrorRetryWait = time.Second + taskProcessorErrorRetryBackoffCoefficient = 1 + taskProcessorErrorRetryMaxAttampts = 5 +) + +var ( + // ErrUnknownReplicationTask is the error to indicate unknown replication task type + ErrUnknownReplicationTask = &shared.BadRequestError{Message: "unknown replication task"} +) + +type ( + // ReplicationTaskProcessor is responsible for processing replication tasks for a shord. + ReplicationTaskProcessor struct { + status int32 + shard ShardContext + lastProcessedMessageID int64 + lastRetrievedMessageID int64 + historyEngine Engine + sourceCluster string + domainReplicator replicator.DomainReplicator + metricsClient metrics.Client + logger log.Logger + retryPolicy backoff.RetryPolicy + + requestChan chan<- *request + done chan struct{} + } + + request struct { + token *r.ReplicationToken + respChan chan<- *r.ReplicationMessages + } +) + +// NewReplicationTaskProcessor creates a new replication task processor. +func NewReplicationTaskProcessor( + shard ShardContext, + historyEngine Engine, + domainReplicator replicator.DomainReplicator, + metricsClient metrics.Client, + replicationTaskFetcher *ReplicationTaskFetcher, +) *ReplicationTaskProcessor { + retryPolicy := backoff.NewExponentialRetryPolicy(taskProcessorErrorRetryWait) + retryPolicy.SetBackoffCoefficient(taskProcessorErrorRetryBackoffCoefficient) + retryPolicy.SetMaximumAttempts(taskProcessorErrorRetryMaxAttampts) + + return &ReplicationTaskProcessor{ + status: common.DaemonStatusInitialized, + shard: shard, + historyEngine: historyEngine, + sourceCluster: replicationTaskFetcher.GetSourceCluster(), + domainReplicator: domainReplicator, + metricsClient: metricsClient, + logger: shard.GetLogger(), + retryPolicy: retryPolicy, + requestChan: replicationTaskFetcher.GetRequestChan(), + done: make(chan struct{}), + } +} + +// Start starts the processor +func (p *ReplicationTaskProcessor) Start() { + if !atomic.CompareAndSwapInt32(&p.status, common.DaemonStatusInitialized, common.DaemonStatusStarted) { + return + } + + go p.processorLoop() + p.logger.Info("ReplicationTaskProcessor started.") +} + +// Stop stops the processor +func (p *ReplicationTaskProcessor) Stop() { + if !atomic.CompareAndSwapInt32(&p.status, common.DaemonStatusStarted, common.DaemonStatusStopped) { + return + } + + close(p.done) +} + +func (p *ReplicationTaskProcessor) processorLoop() { + p.lastProcessedMessageID = p.shard.GetClusterReplicationLevel(p.sourceCluster) + scope := p.metricsClient.Scope(metrics.ReplicationTaskFetcherScope, metrics.TargetClusterTag(p.sourceCluster)) + + for { + respChan := make(chan *r.ReplicationMessages, 1) + // TODO: when we support prefetching, LastRetrivedMessageId can be different than LastProcessedMessageId + p.requestChan <- &request{ + token: &r.ReplicationToken{ + ShardID: common.Int32Ptr(int32(p.shard.GetShardID())), + LastRetrivedMessageId: common.Int64Ptr(p.lastRetrievedMessageID), + LastProcessedMessageId: common.Int64Ptr(p.lastProcessedMessageID), + }, + respChan: respChan, + } + + select { + case response := <-respChan: + p.logger.Debug("Got fetch replication messages response.", + tag.ReadLevel(response.GetLastRetrivedMessageId()), + tag.Bool(response.GetHasMore()), + tag.Counter(len(response.GetReplicationTasks())), + ) + + for _, replicationTask := range response.ReplicationTasks { + p.processTask(replicationTask) + } + + p.lastProcessedMessageID = response.GetLastRetrivedMessageId() + p.lastRetrievedMessageID = response.GetLastRetrivedMessageId() + err := p.shard.UpdateClusterReplicationLevel(p.sourceCluster, p.lastRetrievedMessageID) + if err != nil { + p.logger.Error("Error updating replication level for shard", tag.Error(err), tag.OperationFailed) + } + + scope.UpdateGauge(metrics.LastRetrievedMessageID, float64(p.lastRetrievedMessageID)) + scope.AddCounter(metrics.ReplicationTasksApplied, int64(len(response.GetReplicationTasks()))) + case <-p.done: + p.logger.Info("Closing replication task processor.", tag.ReadLevel(p.lastRetrievedMessageID)) + return + } + } +} + +func (p *ReplicationTaskProcessor) processTask(replicationTask *r.ReplicationTask) { + err := backoff.Retry(func() error { + return p.processTaskOnce(replicationTask) + }, p.retryPolicy, isTransientRetryableError) + + if err != nil { + // TODO: insert into our own dlq in cadence persistence? + // p.nackMsg(msg, err, logger) + p.logger.Error("Failed to apply replication task after retry.", tag.TaskID(replicationTask.GetSourceTaskId())) + } +} + +func (p *ReplicationTaskProcessor) processTaskOnce(replicationTask *r.ReplicationTask) error { + var err error + var scope int + switch replicationTask.GetTaskType() { + case r.ReplicationTaskTypeDomain: + scope = metrics.DomainReplicationTaskScope + err = p.handleDomainReplicationTask(replicationTask) + case r.ReplicationTaskTypeSyncShardStatus: + scope = metrics.SyncShardTaskScope + err = p.handleSyncShardTask(replicationTask) + case r.ReplicationTaskTypeSyncActivity: + scope = metrics.SyncActivityTaskScope + err = p.handleActivityTask(replicationTask) + case r.ReplicationTaskTypeHistory: + scope = metrics.HistoryReplicationTaskScope + err = p.handleHistoryReplicationTask(replicationTask) + case r.ReplicationTaskTypeHistoryMetadata: + // Without kafka we should not have size limits so we don't necessary need this in the new replication scheme. + default: + p.logger.Error("Unknown task type.") + scope = metrics.ReplicatorScope + err = ErrUnknownReplicationTask + } + + if err != nil { + p.updateFailureMetric(scope, err) + } else { + p.logger.Debug("Successfully applied replication task.", tag.TaskID(replicationTask.GetSourceTaskId())) + p.metricsClient.Scope(metrics.ReplicationTaskFetcherScope, metrics.TargetClusterTag(p.sourceCluster)).IncCounter(metrics.ReplicationTasksApplied) + } + + return err +} + +func isTransientRetryableError(err error) bool { + switch err.(type) { + case *shared.BadRequestError: + return false + default: + return true + } +} + +func (p *ReplicationTaskProcessor) updateFailureMetric(scope int, err error) { + // Always update failure counter for all replicator errors + p.metricsClient.IncCounter(scope, metrics.ReplicatorFailures) + + // Also update counter to distinguish between type of failures + switch err := err.(type) { + case *h.ShardOwnershipLostError: + p.metricsClient.IncCounter(scope, metrics.CadenceErrShardOwnershipLostCounter) + case *shared.BadRequestError: + p.metricsClient.IncCounter(scope, metrics.CadenceErrBadRequestCounter) + case *shared.DomainNotActiveError: + p.metricsClient.IncCounter(scope, metrics.CadenceErrDomainNotActiveCounter) + case *shared.WorkflowExecutionAlreadyStartedError: + p.metricsClient.IncCounter(scope, metrics.CadenceErrExecutionAlreadyStartedCounter) + case *shared.EntityNotExistsError: + p.metricsClient.IncCounter(scope, metrics.CadenceErrEntityNotExistsCounter) + case *shared.LimitExceededError: + p.metricsClient.IncCounter(scope, metrics.CadenceErrLimitExceededCounter) + case *shared.RetryTaskError: + p.metricsClient.IncCounter(scope, metrics.CadenceErrRetryTaskCounter) + case *yarpcerrors.Status: + if err.Code() == yarpcerrors.CodeDeadlineExceeded { + p.metricsClient.IncCounter(scope, metrics.CadenceErrContextTimeoutCounter) + } + } +} + +func (p *ReplicationTaskProcessor) handleActivityTask(task *r.ReplicationTask) error { + attr := task.SyncActicvityTaskAttributes + request := &h.SyncActivityRequest{ + DomainId: attr.DomainId, + WorkflowId: attr.WorkflowId, + RunId: attr.RunId, + Version: attr.Version, + ScheduledId: attr.ScheduledId, + ScheduledTime: attr.ScheduledTime, + StartedId: attr.StartedId, + StartedTime: attr.StartedTime, + LastHeartbeatTime: attr.LastHeartbeatTime, + Details: attr.Details, + Attempt: attr.Attempt, + LastFailureReason: attr.LastFailureReason, + LastWorkerIdentity: attr.LastWorkerIdentity, + } + ctx, cancel := context.WithTimeout(context.Background(), replicationTimeout) + defer cancel() + return p.historyEngine.SyncActivity(ctx, request) +} + +func (p *ReplicationTaskProcessor) handleHistoryReplicationTask(task *r.ReplicationTask) error { + attr := task.HistoryTaskAttributes + request := &h.ReplicateEventsRequest{ + SourceCluster: common.StringPtr(p.sourceCluster), + DomainUUID: attr.DomainId, + WorkflowExecution: &shared.WorkflowExecution{ + WorkflowId: attr.WorkflowId, + RunId: attr.RunId, + }, + FirstEventId: attr.FirstEventId, + NextEventId: attr.NextEventId, + Version: attr.Version, + ReplicationInfo: attr.ReplicationInfo, + History: attr.History, + NewRunHistory: attr.NewRunHistory, + ForceBufferEvents: common.BoolPtr(false), + EventStoreVersion: attr.EventStoreVersion, + NewRunEventStoreVersion: attr.NewRunEventStoreVersion, + ResetWorkflow: attr.ResetWorkflow, + } + ctx, cancel := context.WithTimeout(context.Background(), replicationTimeout) + defer cancel() + return p.historyEngine.ReplicateEvents(ctx, request) +} + +func (p *ReplicationTaskProcessor) handleSyncShardTask(task *r.ReplicationTask) error { + attr := task.SyncShardStatusTaskAttributes + if time.Now().Sub(time.Unix(0, attr.GetTimestamp())) > dropSyncShardTaskTimeThreshold { + return nil + } + + req := &h.SyncShardStatusRequest{ + SourceCluster: attr.SourceCluster, + ShardId: attr.ShardId, + Timestamp: attr.Timestamp, + } + ctx, cancel := context.WithTimeout(context.Background(), replicationTimeout) + defer cancel() + return p.historyEngine.SyncShardStatus(ctx, req) +} + +func (p *ReplicationTaskProcessor) handleDomainReplicationTask(task *r.ReplicationTask) error { + p.metricsClient.IncCounter(metrics.DomainReplicationTaskScope, metrics.ReplicatorMessages) + sw := p.metricsClient.StartTimer(metrics.DomainReplicationTaskScope, metrics.ReplicatorLatency) + defer sw.Stop() + + return p.domainReplicator.HandleReceivingTask(task.DomainTaskAttributes) +} diff --git a/service/history/replicatorQueueProcessor.go b/service/history/replicatorQueueProcessor.go index c7714cedf1e..fc5ae425d83 100644 --- a/service/history/replicatorQueueProcessor.go +++ b/service/history/replicatorQueueProcessor.go @@ -56,15 +56,22 @@ type ( ) var ( - errUnknownReplicationTask = errors.New("Unknown replication task") - errHistoryNotFoundTask = errors.New("History not found") + errUnknownReplicationTask = errors.New("unknown replication task") + errHistoryNotFoundTask = errors.New("history not found") defaultHistoryPageSize = 1000 ) -func newReplicatorQueueProcessor(shard ShardContext, historyCache *historyCache, replicator messaging.Producer, - executionMgr persistence.ExecutionManager, historyMgr persistence.HistoryManager, historyV2Mgr persistence.HistoryV2Manager, logger log.Logger) queueProcessor { +func newReplicatorQueueProcessor( + shard ShardContext, + historyCache *historyCache, + replicator messaging.Producer, + executionMgr persistence.ExecutionManager, + historyMgr persistence.HistoryManager, + historyV2Mgr persistence.HistoryV2Manager, + logger log.Logger, +) ReplicatorQueueProcessor { - currentClusterNamer := shard.GetService().GetClusterMetadata().GetCurrentClusterName() + currentClusterName := shard.GetService().GetClusterMetadata().GetCurrentClusterName() config := shard.GetConfig() options := &QueueProcessorOptions{ @@ -86,7 +93,7 @@ func newReplicatorQueueProcessor(shard ShardContext, historyCache *historyCache, } processor := &replicatorQueueProcessorImpl{ - currentClusterNamer: currentClusterNamer, + currentClusterNamer: currentClusterName, shard: shard, historyCache: historyCache, replicationTaskFilter: replicationTaskFilter, @@ -100,7 +107,7 @@ func newReplicatorQueueProcessor(shard ShardContext, historyCache *historyCache, } queueAckMgr := newQueueAckMgr(shard, options, processor, shard.GetReplicatorAckLevel(), logger) - queueProcessorBase := newQueueProcessorBase(currentClusterNamer, shard, options, processor, queueAckMgr, logger) + queueProcessorBase := newQueueProcessorBase(currentClusterName, shard, options, processor, queueAckMgr, logger) processor.queueAckMgr = queueAckMgr processor.queueProcessorBase = queueProcessorBase @@ -140,12 +147,49 @@ func (p *replicatorQueueProcessorImpl) process(qTask queueTaskInfo, shouldProces } } +func (p *replicatorQueueProcessorImpl) toReplicationTask(qTask queueTaskInfo) (*replicator.ReplicationTask, error) { + task, ok := qTask.(*persistence.ReplicationTaskInfo) + if !ok { + return nil, errUnexpectedQueueTask + } + + switch task.TaskType { + case persistence.ReplicationTaskTypeSyncActivity: + task, err := p.getSyncActivityTask(task) + if task != nil { + task.SourceTaskId = common.Int64Ptr(qTask.GetTaskID()) + } + return task, err + case persistence.ReplicationTaskTypeHistory: + task, err := p.getHistoryReplicationTask(task) + if task != nil { + task.SourceTaskId = common.Int64Ptr(qTask.GetTaskID()) + } + return task, err + default: + return nil, errUnknownReplicationTask + } +} + func (p *replicatorQueueProcessorImpl) queueShutdown() error { // there is no shutdown specific behavior for replication queue return nil } -func (p *replicatorQueueProcessorImpl) processSyncActivityTask(task *persistence.ReplicationTaskInfo) (retError error) { +func (p *replicatorQueueProcessorImpl) processSyncActivityTask(task *persistence.ReplicationTaskInfo) error { + replicationTask, err := p.getSyncActivityTask(task) + if err != nil { + return err + } + + if replicationTask == nil { + return nil + } + + return p.replicator.Publish(replicationTask) +} + +func (p *replicatorQueueProcessorImpl) getSyncActivityTask(task *persistence.ReplicationTaskInfo) (replicationTask *replicator.ReplicationTask, retError error) { domainID := task.DomainID execution := shared.WorkflowExecution{ WorkflowId: common.StringPtr(task.WorkflowID), @@ -153,25 +197,25 @@ func (p *replicatorQueueProcessorImpl) processSyncActivityTask(task *persistence } context, release, err := p.historyCache.getOrCreateWorkflowExecutionForBackground(domainID, execution) if err != nil { - return err + return nil, err } defer func() { release(retError) }() msBuilder, err := context.loadWorkflowExecution() if err != nil { if _, ok := err.(*shared.EntityNotExistsError); ok { - return nil + return nil, nil } - return err + return nil, err } if !msBuilder.IsWorkflowExecutionRunning() { // workflow already finished, no need to process the timer - return nil + return nil, nil } activityInfo, ok := msBuilder.GetActivityInfo(task.ScheduledID) if !ok { - return nil + return nil, nil } var startedTime *int64 @@ -183,7 +227,7 @@ func (p *replicatorQueueProcessorImpl) processSyncActivityTask(task *persistence // LastHeartBeatUpdatedTime must be valid when getting the sync activity replication task heartbeatTime = common.Int64Ptr(activityInfo.LastHeartBeatUpdatedTime.UnixNano()) - replicationTask := &replicator.ReplicationTask{ + replicationTask = &replicator.ReplicationTask{ TaskType: replicator.ReplicationTaskType.Ptr(replicator.ReplicationTaskTypeSyncActivity), SyncActicvityTaskAttributes: &replicator.SyncActicvityTaskAttributes{ DomainId: common.StringPtr(task.DomainID), @@ -202,7 +246,7 @@ func (p *replicatorQueueProcessorImpl) processSyncActivityTask(task *persistence }, } - return p.replicator.Publish(replicationTask) + return replicationTask, retError } func (p *replicatorQueueProcessorImpl) processHistoryReplicationTask(task *persistence.ReplicationTaskInfo) error { @@ -230,6 +274,22 @@ func (p *replicatorQueueProcessorImpl) processHistoryReplicationTask(task *persi return err } +func (p *replicatorQueueProcessorImpl) getHistoryReplicationTask( + task *persistence.ReplicationTaskInfo, +) (*replicator.ReplicationTask, error) { + domainEntry, err := p.shard.GetDomainCache().GetDomainByID(task.DomainID) + if err != nil { + return nil, err + } + + var targetClusters []string + for _, cluster := range domainEntry.GetReplicationConfig().Clusters { + targetClusters = append(targetClusters, cluster.ClusterName) + } + + return GenerateReplicationTask(targetClusters, task, p.historyMgr, p.historyV2Mgr, p.metricsClient, p.logger, nil, common.IntPtr(p.shard.GetShardID())) +} + func (p *replicatorQueueProcessorImpl) generateHistoryMetadataTask(targetClusters []string, task *persistence.ReplicationTaskInfo) *replicator.ReplicationTask { return &replicator.ReplicationTask{ TaskType: replicator.ReplicationTaskTypeHistoryMetadata.Ptr(), @@ -490,3 +550,29 @@ func convertLastReplicationInfo(info map[string]*persistence.ReplicationInfo) ma return replicationInfoMap } + +func (p *replicatorQueueProcessorImpl) getTasks(readLevel int64) (*replicator.ReplicationMessages, error) { + taskInfoList, hasMore, err := p.readTasks(readLevel) + if err != nil { + return nil, err + } + + var replicationTasks []*replicator.ReplicationTask + for _, taskInfo := range taskInfoList { + readLevel = taskInfo.GetTaskID() + replicationTask, err := p.toReplicationTask(taskInfo) + if err != nil { + return nil, err + } + + if replicationTask != nil { + replicationTasks = append(replicationTasks, replicationTask) + } + } + + return &replicator.ReplicationMessages{ + ReplicationTasks: replicationTasks, + HasMore: common.BoolPtr(hasMore), + LastRetrivedMessageId: common.Int64Ptr(readLevel), + }, nil +} diff --git a/service/history/replicatorQueueProcessor_mock.go b/service/history/replicatorQueueProcessor_mock.go new file mode 100644 index 00000000000..c6ac64d934a --- /dev/null +++ b/service/history/replicatorQueueProcessor_mock.go @@ -0,0 +1,105 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/uber/cadence/service/history (interfaces: ReplicatorQueueProcessor) + +// Package history is a generated GoMock package. +package history + +import ( + gomock "github.com/golang/mock/gomock" + replicator "github.com/uber/cadence/.gen/go/replicator" + reflect "reflect" +) + +// MockReplicatorQueueProcessor is a mock of ReplicatorQueueProcessor interface +type MockReplicatorQueueProcessor struct { + ctrl *gomock.Controller + recorder *MockReplicatorQueueProcessorMockRecorder +} + +// MockReplicatorQueueProcessorMockRecorder is the mock recorder for MockReplicatorQueueProcessor +type MockReplicatorQueueProcessorMockRecorder struct { + mock *MockReplicatorQueueProcessor +} + +// NewMockReplicatorQueueProcessor creates a new mock instance +func NewMockReplicatorQueueProcessor(ctrl *gomock.Controller) *MockReplicatorQueueProcessor { + mock := &MockReplicatorQueueProcessor{ctrl: ctrl} + mock.recorder = &MockReplicatorQueueProcessorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockReplicatorQueueProcessor) EXPECT() *MockReplicatorQueueProcessorMockRecorder { + return m.recorder +} + +// Start mocks base method +func (m *MockReplicatorQueueProcessor) Start() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Start") +} + +// Start indicates an expected call of Start +func (mr *MockReplicatorQueueProcessorMockRecorder) Start() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockReplicatorQueueProcessor)(nil).Start)) +} + +// Stop mocks base method +func (m *MockReplicatorQueueProcessor) Stop() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Stop") +} + +// Stop indicates an expected call of Stop +func (mr *MockReplicatorQueueProcessorMockRecorder) Stop() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockReplicatorQueueProcessor)(nil).Stop)) +} + +// getTasks mocks base method +func (m *MockReplicatorQueueProcessor) getTasks(arg0 int64) (*replicator.ReplicationMessages, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getTasks", arg0) + ret0, _ := ret[0].(*replicator.ReplicationMessages) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// getTasks indicates an expected call of getTasks +func (mr *MockReplicatorQueueProcessorMockRecorder) getTasks(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getTasks", reflect.TypeOf((*MockReplicatorQueueProcessor)(nil).getTasks), arg0) +} + +// notifyNewTask mocks base method +func (m *MockReplicatorQueueProcessor) notifyNewTask() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "notifyNewTask") +} + +// notifyNewTask indicates an expected call of notifyNewTask +func (mr *MockReplicatorQueueProcessorMockRecorder) notifyNewTask() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "notifyNewTask", reflect.TypeOf((*MockReplicatorQueueProcessor)(nil).notifyNewTask)) +} diff --git a/service/history/shardContext.go b/service/history/shardContext.go index eb25027a690..822e8a4cfd6 100644 --- a/service/history/shardContext.go +++ b/service/history/shardContext.go @@ -77,6 +77,9 @@ type ( GetReplicatorAckLevel() int64 UpdateReplicatorAckLevel(ackLevel int64) error + GetClusterReplicationLevel(cluster string) int64 + UpdateClusterReplicationLevel(cluster string, lastTaskID int64) error + GetTimerAckLevel() time.Time UpdateTimerAckLevel(ackLevel time.Time) error GetTimerClusterAckLevel(cluster string) time.Time @@ -260,6 +263,28 @@ func (s *shardContextImpl) UpdateReplicatorAckLevel(ackLevel int64) error { return s.updateShardInfoLocked() } +func (s *shardContextImpl) GetClusterReplicationLevel(cluster string) int64 { + s.RLock() + defer s.RUnlock() + + // if we can find corresponding replication level + if replicationLevel, ok := s.shardInfo.ClusterReplicationLevel[cluster]; ok { + return replicationLevel + } + + // New cluster always starts from -1 + return -1 +} + +func (s *shardContextImpl) UpdateClusterReplicationLevel(cluster string, lastTaskID int64) error { + s.Lock() + defer s.Unlock() + + s.shardInfo.ClusterReplicationLevel[cluster] = lastTaskID + s.shardInfo.StolenSinceRenew = 0 + return s.updateShardInfoLocked() +} + func (s *shardContextImpl) GetTimerAckLevel() time.Time { s.RLock() defer s.RUnlock() @@ -1265,6 +1290,10 @@ func copyShardInfo(shardInfo *persistence.ShardInfo) *persistence.ShardInfo { for k, v := range shardInfo.ClusterTimerAckLevel { clusterTimerAckLevel[k] = v } + clusterReplicationLevel := make(map[string]int64) + for k, v := range shardInfo.ClusterReplicationLevel { + clusterReplicationLevel[k] = v + } shardInfoCopy := &persistence.ShardInfo{ ShardID: shardInfo.ShardID, Owner: shardInfo.Owner, @@ -1278,6 +1307,7 @@ func copyShardInfo(shardInfo *persistence.ShardInfo) *persistence.ShardInfo { ClusterTransferAckLevel: clusterTransferAckLevel, ClusterTimerAckLevel: clusterTimerAckLevel, DomainNotificationVersion: shardInfo.DomainNotificationVersion, + ClusterReplicationLevel: clusterReplicationLevel, } return shardInfoCopy diff --git a/service/history/shardController_test.go b/service/history/shardController_test.go index e2fb181e2d8..fb3c6f5ccd5 100644 --- a/service/history/shardController_test.go +++ b/service/history/shardController_test.go @@ -142,6 +142,7 @@ func (s *shardControllerSuite) TestAcquireShardSuccess() { cluster.TestCurrentClusterName: currentClusterTimerAck, cluster.TestAlternativeClusterName: alternativeClusterTimerAck, }, + ClusterReplicationLevel: map[string]int64{}, }, }, nil).Once() s.mockShardManager.On("UpdateShard", &persistence.UpdateShardRequest{ @@ -161,8 +162,9 @@ func (s *shardControllerSuite) TestAcquireShardSuccess() { cluster.TestCurrentClusterName: currentClusterTimerAck, cluster.TestAlternativeClusterName: alternativeClusterTimerAck, }, - TransferFailoverLevels: map[string]persistence.TransferFailoverLevel{}, - TimerFailoverLevels: map[string]persistence.TimerFailoverLevel{}, + TransferFailoverLevels: map[string]persistence.TransferFailoverLevel{}, + TimerFailoverLevels: map[string]persistence.TimerFailoverLevel{}, + ClusterReplicationLevel: map[string]int64{}, }, PreviousRangeID: 5, }).Return(nil).Once() @@ -232,6 +234,7 @@ func (s *shardControllerSuite) TestAcquireShardRenewSuccess() { cluster.TestCurrentClusterName: currentClusterTimerAck, cluster.TestAlternativeClusterName: alternativeClusterTimerAck, }, + ClusterReplicationLevel: map[string]int64{}, }, }, nil).Once() s.mockShardManager.On("UpdateShard", &persistence.UpdateShardRequest{ @@ -251,8 +254,9 @@ func (s *shardControllerSuite) TestAcquireShardRenewSuccess() { cluster.TestCurrentClusterName: currentClusterTimerAck, cluster.TestAlternativeClusterName: alternativeClusterTimerAck, }, - TransferFailoverLevels: map[string]persistence.TransferFailoverLevel{}, - TimerFailoverLevels: map[string]persistence.TimerFailoverLevel{}, + TransferFailoverLevels: map[string]persistence.TransferFailoverLevel{}, + TimerFailoverLevels: map[string]persistence.TimerFailoverLevel{}, + ClusterReplicationLevel: map[string]int64{}, }, PreviousRangeID: 5, }).Return(nil).Once() @@ -307,6 +311,7 @@ func (s *shardControllerSuite) TestAcquireShardRenewLookupFailed() { cluster.TestCurrentClusterName: currentClusterTimerAck, cluster.TestAlternativeClusterName: alternativeClusterTimerAck, }, + ClusterReplicationLevel: map[string]int64{}, }, }, nil).Once() s.mockShardManager.On("UpdateShard", &persistence.UpdateShardRequest{ @@ -326,8 +331,9 @@ func (s *shardControllerSuite) TestAcquireShardRenewLookupFailed() { cluster.TestCurrentClusterName: currentClusterTimerAck, cluster.TestAlternativeClusterName: alternativeClusterTimerAck, }, - TransferFailoverLevels: map[string]persistence.TransferFailoverLevel{}, - TimerFailoverLevels: map[string]persistence.TimerFailoverLevel{}, + TransferFailoverLevels: map[string]persistence.TransferFailoverLevel{}, + TimerFailoverLevels: map[string]persistence.TimerFailoverLevel{}, + ClusterReplicationLevel: map[string]int64{}, }, PreviousRangeID: 5, }).Return(nil).Once() @@ -603,6 +609,7 @@ func (s *shardControllerSuite) setupMocksForAcquireShard(shardID int, mockEngine cluster.TestCurrentClusterName: currentClusterTimerAck, cluster.TestAlternativeClusterName: alternativeClusterTimerAck, }, + ClusterReplicationLevel: map[string]int64{}, }, }, nil).Once() s.mockShardManager.On("UpdateShard", &persistence.UpdateShardRequest{ @@ -622,8 +629,9 @@ func (s *shardControllerSuite) setupMocksForAcquireShard(shardID int, mockEngine cluster.TestCurrentClusterName: currentClusterTimerAck, cluster.TestAlternativeClusterName: alternativeClusterTimerAck, }, - TransferFailoverLevels: map[string]persistence.TransferFailoverLevel{}, - TimerFailoverLevels: map[string]persistence.TimerFailoverLevel{}, + TransferFailoverLevels: map[string]persistence.TransferFailoverLevel{}, + TimerFailoverLevels: map[string]persistence.TimerFailoverLevel{}, + ClusterReplicationLevel: map[string]int64{}, }, PreviousRangeID: currentRangeID, }).Return(nil).Once() diff --git a/service/history/timerQueueProcessor2_test.go b/service/history/timerQueueProcessor2_test.go index 372fdb05601..be795856e84 100644 --- a/service/history/timerQueueProcessor2_test.go +++ b/service/history/timerQueueProcessor2_test.go @@ -25,6 +25,7 @@ import ( "testing" "time" + "github.com/golang/mock/gomock" "github.com/pborman/uuid" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" @@ -55,6 +56,7 @@ type ( config *Config logger log.Logger + mockCtrl *gomock.Controller mockHistoryEngine *historyEngineImpl mockMatchingClient *mocks.MatchingClient mockMetadataMgr *mocks.MetadataManager @@ -70,7 +72,7 @@ type ( mockService service.Service mockEventsCache *MockEventsCache mockTxProcessor *MockTransferQueueProcessor - mockReplicationProcessor *mockQueueProcessor + mockReplicationProcessor *MockReplicatorQueueProcessor mockTimerProcessor *MockTimerQueueProcessor domainID string @@ -94,6 +96,7 @@ func (s *timerQueueProcessor2Suite) SetupSuite() { func (s *timerQueueProcessor2Suite) SetupTest() { shardID := 0 + s.mockCtrl = gomock.NewController(s.T()) s.mockMatchingClient = &mocks.MatchingClient{} s.mockExecutionMgr = &mocks.ExecutionManager{} s.mockShardManager = &mocks.ShardManager{} @@ -160,8 +163,8 @@ func (s *timerQueueProcessor2Suite) SetupTest() { s.mockClusterMetadata.On("ClusterNameForFailoverVersion", common.EmptyVersion).Return(cluster.TestCurrentClusterName) s.mockTxProcessor = &MockTransferQueueProcessor{} s.mockTxProcessor.On("NotifyNewTask", mock.Anything, mock.Anything).Maybe() - s.mockReplicationProcessor = &mockQueueProcessor{} - s.mockReplicationProcessor.On("notifyNewTask").Maybe() + s.mockReplicationProcessor = NewMockReplicatorQueueProcessor(s.mockCtrl) + s.mockReplicationProcessor.EXPECT().notifyNewTask().AnyTimes() s.mockTimerProcessor = &MockTimerQueueProcessor{} s.mockTimerProcessor.On("NotifyNewTimers", mock.Anything, mock.Anything).Maybe() s.mockClusterMetadata.On("IsArchivalEnabled").Return(false) @@ -192,6 +195,7 @@ func (s *timerQueueProcessor2Suite) SetupTest() { } func (s *timerQueueProcessor2Suite) TearDownTest() { + s.mockCtrl.Finish() s.mockShardManager.AssertExpectations(s.T()) s.mockMatchingClient.AssertExpectations(s.T()) s.mockExecutionMgr.AssertExpectations(s.T()) @@ -202,7 +206,6 @@ func (s *timerQueueProcessor2Suite) TearDownTest() { s.mockClientBean.AssertExpectations(s.T()) s.mockEventsCache.AssertExpectations(s.T()) s.mockTxProcessor.AssertExpectations(s.T()) - s.mockReplicationProcessor.AssertExpectations(s.T()) s.mockTimerProcessor.AssertExpectations(s.T()) } diff --git a/service/history/timerQueueProcessor_test.go b/service/history/timerQueueProcessor_test.go index c92714520e3..c8ce13eef60 100644 --- a/service/history/timerQueueProcessor_test.go +++ b/service/history/timerQueueProcessor_test.go @@ -25,6 +25,7 @@ import ( "testing" "time" + "github.com/golang/mock/gomock" "github.com/pborman/uuid" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" @@ -51,13 +52,14 @@ type ( shardClosedCh chan int logger log.Logger + mockCtrl *gomock.Controller mockMetadataMgr *mocks.MetadataManager mockVisibilityMgr *mocks.VisibilityManager mockMatchingClient *mocks.MatchingClient mockClusterMetadata *mocks.ClusterMetadata mockEventsCache *MockEventsCache mockTxProcessor *MockTransferQueueProcessor - mockReplicationProcessor *mockQueueProcessor + mockReplicationProcessor *MockReplicatorQueueProcessor mockTimerProcessor *MockTimerQueueProcessor } ) @@ -81,6 +83,8 @@ func (s *timerQueueProcessorSuite) SetupTest() { s.ShardContext.config.TransferProcessorUpdateAckInterval = dynamicconfig.GetDurationPropertyFn(100 * time.Millisecond) s.ShardContext.config.TimerProcessorUpdateAckInterval = dynamicconfig.GetDurationPropertyFn(100 * time.Millisecond) + s.mockCtrl = gomock.NewController(s.T()) + s.mockMatchingClient = &mocks.MatchingClient{} s.mockClusterMetadata = &mocks.ClusterMetadata{} @@ -89,8 +93,8 @@ func (s *timerQueueProcessorSuite) SetupTest() { s.mockClusterMetadata.On("ClusterNameForFailoverVersion", common.EmptyVersion).Return(cluster.TestCurrentClusterName) s.mockTxProcessor = &MockTransferQueueProcessor{} s.mockTxProcessor.On("NotifyNewTask", mock.Anything, mock.Anything).Maybe() - s.mockReplicationProcessor = &mockQueueProcessor{} - s.mockReplicationProcessor.On("notifyNewTask").Maybe() + s.mockReplicationProcessor = NewMockReplicatorQueueProcessor(s.mockCtrl) + s.mockReplicationProcessor.EXPECT().notifyNewTask().AnyTimes() s.mockTimerProcessor = &MockTimerQueueProcessor{} s.mockTimerProcessor.On("NotifyNewTimers", mock.Anything, mock.Anything).Maybe() diff --git a/service/history/timerQueueStandbyProcessor_test.go b/service/history/timerQueueStandbyProcessor_test.go index 1ad3775c1ef..6a3a5da823d 100644 --- a/service/history/timerQueueStandbyProcessor_test.go +++ b/service/history/timerQueueStandbyProcessor_test.go @@ -24,6 +24,7 @@ import ( "testing" "time" + "github.com/golang/mock/gomock" "github.com/pborman/uuid" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" @@ -49,6 +50,7 @@ type ( timerQueueStandbyProcessorSuite struct { suite.Suite + mockCtrl *gomock.Controller mockShardManager *mocks.ShardManager mockHistoryEngine *historyEngineImpl mockMetadataMgr *mocks.MetadataManager @@ -63,7 +65,7 @@ type ( mockHistoryRereplicator *xdc.MockHistoryRereplicator logger log.Logger mockTxProcessor *MockTransferQueueProcessor - mockReplicationProcessor *mockQueueProcessor + mockReplicationProcessor *MockReplicatorQueueProcessor mockTimerProcessor *MockTimerQueueProcessor domainID string @@ -85,6 +87,7 @@ func (s *timerQueueStandbyProcessorSuite) SetupSuite() { func (s *timerQueueStandbyProcessorSuite) SetupTest() { shardID := 0 s.logger = loggerimpl.NewDevelopmentForTest(s.Suite) + s.mockCtrl = gomock.NewController(s.T()) s.mockShardManager = &mocks.ShardManager{} s.mockExecutionMgr = &mocks.ExecutionManager{} s.mockHistoryMgr = &mocks.HistoryManager{} @@ -139,8 +142,8 @@ func (s *timerQueueStandbyProcessorSuite) SetupTest() { s.mockClusterMetadata.On("IsGlobalDomainEnabled").Return(true) s.mockTxProcessor = &MockTransferQueueProcessor{} s.mockTxProcessor.On("NotifyNewTask", mock.Anything, mock.Anything).Maybe() - s.mockReplicationProcessor = &mockQueueProcessor{} - s.mockReplicationProcessor.On("notifyNewTask").Maybe() + s.mockReplicationProcessor = NewMockReplicatorQueueProcessor(s.mockCtrl) + s.mockReplicationProcessor.EXPECT().notifyNewTask().AnyTimes() s.mockTimerProcessor = &MockTimerQueueProcessor{} s.mockTimerProcessor.On("NotifyNewTimers", mock.Anything, mock.Anything).Maybe() @@ -170,6 +173,7 @@ func (s *timerQueueStandbyProcessorSuite) SetupTest() { } func (s *timerQueueStandbyProcessorSuite) TearDownTest() { + s.mockCtrl.Finish() s.mockShardManager.AssertExpectations(s.T()) s.mockExecutionMgr.AssertExpectations(s.T()) s.mockHistoryMgr.AssertExpectations(s.T()) @@ -177,7 +181,6 @@ func (s *timerQueueStandbyProcessorSuite) TearDownTest() { s.mockHistoryRereplicator.AssertExpectations(s.T()) s.mockClientBean.AssertExpectations(s.T()) s.mockTxProcessor.AssertExpectations(s.T()) - s.mockReplicationProcessor.AssertExpectations(s.T()) s.mockTimerProcessor.AssertExpectations(s.T()) } diff --git a/service/history/transferQueueActiveProcessor_test.go b/service/history/transferQueueActiveProcessor_test.go index 64014799795..0825aa182e2 100644 --- a/service/history/transferQueueActiveProcessor_test.go +++ b/service/history/transferQueueActiveProcessor_test.go @@ -24,11 +24,11 @@ import ( "testing" "time" + "github.com/golang/mock/gomock" "github.com/pborman/uuid" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" "github.com/uber-go/tally" - "github.com/uber/cadence/.gen/go/history" "github.com/uber/cadence/.gen/go/matching" workflow "github.com/uber/cadence/.gen/go/shared" @@ -52,6 +52,7 @@ type ( transferQueueActiveProcessorSuite struct { suite.Suite + mockCtrl *gomock.Controller mockShardManager *mocks.ShardManager mockHistoryEngine *historyEngineImpl mockMetadataMgr *mocks.MetadataManager @@ -70,7 +71,7 @@ type ( mockService service.Service logger log.Logger mockTxProcessor *MockTransferQueueProcessor - mockReplicationProcessor *mockQueueProcessor + mockReplicationProcessor *MockReplicatorQueueProcessor mockTimerProcessor *MockTimerQueueProcessor domainID string @@ -96,6 +97,7 @@ func (s *transferQueueActiveProcessorSuite) TearDownSuite() { func (s *transferQueueActiveProcessorSuite) SetupTest() { shardID := 0 s.logger = loggerimpl.NewDevelopmentForTest(s.Suite) + s.mockCtrl = gomock.NewController(s.T()) s.mockShardManager = &mocks.ShardManager{} s.mockExecutionMgr = &mocks.ExecutionManager{} s.mockHistoryMgr = &mocks.HistoryManager{} @@ -149,8 +151,8 @@ func (s *transferQueueActiveProcessorSuite) SetupTest() { s.mockClusterMetadata.On("IsGlobalDomainEnabled").Return(true) s.mockTxProcessor = &MockTransferQueueProcessor{} s.mockTxProcessor.On("NotifyNewTask", mock.Anything, mock.Anything).Maybe() - s.mockReplicationProcessor = &mockQueueProcessor{} - s.mockReplicationProcessor.On("notifyNewTask").Maybe() + s.mockReplicationProcessor = NewMockReplicatorQueueProcessor(s.mockCtrl) + s.mockReplicationProcessor.EXPECT().notifyNewTask().AnyTimes() s.mockTimerProcessor = &MockTimerQueueProcessor{} s.mockTimerProcessor.On("NotifyNewTimers", mock.Anything, mock.Anything).Maybe() @@ -183,6 +185,7 @@ func (s *transferQueueActiveProcessorSuite) SetupTest() { } func (s *transferQueueActiveProcessorSuite) TearDownTest() { + s.mockCtrl.Finish() s.mockShardManager.AssertExpectations(s.T()) s.mockExecutionMgr.AssertExpectations(s.T()) s.mockHistoryMgr.AssertExpectations(s.T()) @@ -194,7 +197,6 @@ func (s *transferQueueActiveProcessorSuite) TearDownTest() { s.mockQueueAckMgr.AssertExpectations(s.T()) s.mockClientBean.AssertExpectations(s.T()) s.mockTxProcessor.AssertExpectations(s.T()) - s.mockReplicationProcessor.AssertExpectations(s.T()) s.mockTimerProcessor.AssertExpectations(s.T()) } diff --git a/service/history/transferQueueStandbyProcessor_test.go b/service/history/transferQueueStandbyProcessor_test.go index d1578eed00b..a70e1930b86 100644 --- a/service/history/transferQueueStandbyProcessor_test.go +++ b/service/history/transferQueueStandbyProcessor_test.go @@ -24,6 +24,7 @@ import ( "testing" "time" + "github.com/golang/mock/gomock" "github.com/pborman/uuid" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" @@ -49,6 +50,7 @@ type ( transferQueueStandbyProcessorSuite struct { suite.Suite + mockCtrl *gomock.Controller mockShardManager *mocks.ShardManager mockHistoryEngine *historyEngineImpl mockMetadataMgr *mocks.MetadataManager @@ -66,7 +68,7 @@ type ( mockHistoryRereplicator *xdc.MockHistoryRereplicator logger log.Logger mockTxProcessor *MockTransferQueueProcessor - mockReplicationProcessor *mockQueueProcessor + mockReplicationProcessor *MockReplicatorQueueProcessor mockTimerProcessor *MockTimerQueueProcessor domainID string @@ -88,6 +90,7 @@ func (s *transferQueueStandbyProcessorSuite) SetupSuite() { func (s *transferQueueStandbyProcessorSuite) SetupTest() { shardID := 0 s.logger = loggerimpl.NewDevelopmentForTest(s.Suite) + s.mockCtrl = gomock.NewController(s.T()) s.mockShardManager = &mocks.ShardManager{} s.mockExecutionMgr = &mocks.ExecutionManager{} s.mockHistoryMgr = &mocks.HistoryManager{} @@ -145,8 +148,8 @@ func (s *transferQueueStandbyProcessorSuite) SetupTest() { s.mockClusterMetadata.On("IsGlobalDomainEnabled").Return(true) s.mockTxProcessor = &MockTransferQueueProcessor{} s.mockTxProcessor.On("NotifyNewTask", mock.Anything, mock.Anything).Maybe() - s.mockReplicationProcessor = &mockQueueProcessor{} - s.mockReplicationProcessor.On("notifyNewTask").Maybe() + s.mockReplicationProcessor = NewMockReplicatorQueueProcessor(s.mockCtrl) + s.mockReplicationProcessor.EXPECT().notifyNewTask().AnyTimes() s.mockTimerProcessor = &MockTimerQueueProcessor{} s.mockTimerProcessor.On("NotifyNewTimers", mock.Anything, mock.Anything).Maybe() @@ -181,6 +184,7 @@ func (s *transferQueueStandbyProcessorSuite) SetupTest() { } func (s *transferQueueStandbyProcessorSuite) TearDownTest() { + s.mockCtrl.Finish() s.mockShardManager.AssertExpectations(s.T()) s.mockExecutionMgr.AssertExpectations(s.T()) s.mockHistoryMgr.AssertExpectations(s.T()) @@ -189,7 +193,6 @@ func (s *transferQueueStandbyProcessorSuite) TearDownTest() { s.mockClientBean.AssertExpectations(s.T()) s.mockHistoryRereplicator.AssertExpectations(s.T()) s.mockTxProcessor.AssertExpectations(s.T()) - s.mockReplicationProcessor.AssertExpectations(s.T()) s.mockTimerProcessor.AssertExpectations(s.T()) } diff --git a/service/history/workflowResetor_test.go b/service/history/workflowResetor_test.go index 9ad31ea173d..362833b7992 100644 --- a/service/history/workflowResetor_test.go +++ b/service/history/workflowResetor_test.go @@ -27,6 +27,7 @@ import ( "testing" "time" + "github.com/golang/mock/gomock" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -58,6 +59,7 @@ type ( // not merely log an error *require.Assertions historyEngine *historyEngineImpl + mockCtrl *gomock.Controller mockMatchingClient *mocks.MatchingClient mockHistoryClient *mocks.HistoryClient mockMetadataMgr *mocks.MetadataManager @@ -76,7 +78,7 @@ type ( mockEventsCache *MockEventsCache resetor workflowResetor mockTxProcessor *MockTransferQueueProcessor - mockReplicationProcessor *mockQueueProcessor + mockReplicationProcessor *MockReplicatorQueueProcessor mockTimerProcessor *MockTimerQueueProcessor shardClosedCh chan int @@ -106,6 +108,7 @@ func (s *resetorSuite) SetupTest() { shardID := 10 s.shardID = shardID + s.mockCtrl = gomock.NewController(s.T()) s.mockMatchingClient = &mocks.MatchingClient{} s.mockHistoryClient = &mocks.HistoryClient{} s.mockMetadataMgr = &mocks.MetadataManager{} @@ -150,8 +153,8 @@ func (s *resetorSuite) SetupTest() { s.mockClusterMetadata.On("ClusterNameForFailoverVersion", common.EmptyVersion).Return(cluster.TestCurrentClusterName) s.mockTxProcessor = &MockTransferQueueProcessor{} s.mockTxProcessor.On("NotifyNewTask", mock.Anything, mock.Anything).Maybe() - s.mockReplicationProcessor = &mockQueueProcessor{} - s.mockReplicationProcessor.On("notifyNewTask").Maybe() + s.mockReplicationProcessor = NewMockReplicatorQueueProcessor(s.mockCtrl) + s.mockReplicationProcessor.EXPECT().notifyNewTask().AnyTimes() s.mockTimerProcessor = &MockTimerQueueProcessor{} s.mockTimerProcessor.On("NotifyNewTimers", mock.Anything, mock.Anything).Maybe() @@ -181,6 +184,7 @@ func (s *resetorSuite) SetupTest() { } func (s *resetorSuite) TearDownTest() { + s.mockCtrl.Finish() s.mockMatchingClient.AssertExpectations(s.T()) s.mockExecutionMgr.AssertExpectations(s.T()) s.mockHistoryMgr.AssertExpectations(s.T()) @@ -192,7 +196,6 @@ func (s *resetorSuite) TearDownTest() { s.mockArchivalClient.AssertExpectations(s.T()) s.mockEventsCache.AssertExpectations(s.T()) s.mockTxProcessor.AssertExpectations(s.T()) - s.mockReplicationProcessor.AssertExpectations(s.T()) s.mockTimerProcessor.AssertExpectations(s.T()) } diff --git a/tools/cassandra/updateTask_test.go b/tools/cassandra/updateTask_test.go index 77a736dd220..35841c94e7b 100644 --- a/tools/cassandra/updateTask_test.go +++ b/tools/cassandra/updateTask_test.go @@ -60,5 +60,5 @@ func (s *UpdateSchemaTestSuite) TestDryrun() { s.Nil(err) defer client.Close() dir := "../../schema/cassandra/cadence/versioned" - s.RunDryrunTest(buildCLIOptions(), client, "-k", dir, "0.21") + s.RunDryrunTest(buildCLIOptions(), client, "-k", dir, "0.22") }