Skip to content

Commit

Permalink
AMBARI-14046. Refactor code that breaks transaction chaining. (mpapir…
Browse files Browse the repository at this point in the history
…kovskyy)
  • Loading branch information
Myroslav Papirkovskyy committed Nov 24, 2015
1 parent dffd776 commit 59c8885
Show file tree
Hide file tree
Showing 17 changed files with 22 additions and 23 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -4326,7 +4326,7 @@ private WidgetEntity addIfNotExistsWidgetEntity(WidgetLayoutInfo layoutInfo, Clu
}

@Transactional
private void createWidgetsAndLayouts(Cluster cluster, List<WidgetLayout> widgetLayouts) {
void createWidgetsAndLayouts(Cluster cluster, List<WidgetLayout> widgetLayouts) {
String user = "ambari";
Long clusterId = cluster.getClusterId();
ClusterEntity clusterEntity = clusterDAO.findById(clusterId);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1050,7 +1050,7 @@ private void validateKDCCredentials(KerberosDetails kerberosDetails, Cluster clu
* Kerberos-specific configuration details
*/
@Transactional
private RequestStageContainer handle(Cluster cluster,
RequestStageContainer handle(Cluster cluster,
KerberosDetails kerberosDetails,
Map<String, ? extends Collection<String>> serviceComponentFilter,
Set<String> hostFilter, Collection<String> identityFilter,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ private Long getClusterIdByName(String clusterName) {
* @param toCreateHistoryAndMerge - create new history, merge alert
*/
@Transactional
private void saveEntities(Map<Alert, AlertCurrentEntity> toCreate,
void saveEntities(Map<Alert, AlertCurrentEntity> toCreate,
Map<Alert, AlertCurrentEntity> toMerge,
Map<Alert, AlertCurrentEntity> toCreateHistoryAndMerge) {
for (Map.Entry<Alert, AlertCurrentEntity> entry : toCreate.entrySet()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -564,7 +564,7 @@ public void refresh() {
* has already been acquired from {@link #readWriteLock}.
*/
@Transactional
private void saveIfPersisted() {
void saveIfPersisted() {
if (isPersisted()) {
serviceComponentDesiredStateDAO.merge(desiredStateEntity);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -506,7 +506,7 @@ protected void persistEntities() {
}

@Transactional
private void saveIfPersisted() {
void saveIfPersisted() {
if (isPersisted()) {
clusterServiceDAO.merge(serviceEntity);
serviceDesiredStateDAO.merge(serviceDesiredStateEntity);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ private void checkLoaded() {
}

@Transactional
private void loadClustersAndHosts() {
void loadClustersAndHosts() {
for (ClusterEntity clusterEntity : clusterDAO.findAll()) {
Cluster currentCluster = clusterFactory.create(clusterEntity);
clusters.put(clusterEntity.getClusterName(), currentCluster);
Expand Down Expand Up @@ -588,7 +588,7 @@ public void mapHostToCluster(String hostname, String clusterName)
}

@Transactional
private void mapHostClusterEntities(String hostName, Long clusterId) {
void mapHostClusterEntities(String hostName, Long clusterId) {
HostEntity hostEntity = hostDAO.findByName(hostName);
ClusterEntity clusterEntity = clusterDAO.findById(clusterId);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -458,13 +458,12 @@ void saveIfPersisted() {
}

@Transactional
private void save(ClusterEntity clusterEntity) {
void save(ClusterEntity clusterEntity) {
persistHostMapping();
persistConfigMapping(clusterEntity);
}

@Override
@Transactional
public void delete() {
cluster.getClusterGlobalLock().writeLock().lock();
try {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ public String getDescription() {
* Persist @RequestScheduleEntity with @RequestScheduleBatchHostEntity
*/
@Transactional
private void persistEntities() {
void persistEntities() {
ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
requestScheduleEntity.setClusterEntity(clusterEntity);
requestScheduleEntity.setCreateTimestamp(System.currentTimeMillis());
Expand All @@ -258,7 +258,7 @@ private void persistEntities() {
}

@Transactional
private void persistRequestMapping() {
void persistRequestMapping() {
// Delete existing mappings to support updates
if (isPersisted) {
batchRequestDAO.removeByScheduleId(requestScheduleEntity.getScheduleId());
Expand Down Expand Up @@ -293,7 +293,7 @@ private void persistRequestMapping() {
}

@Transactional
private void saveIfPersisted() {
void saveIfPersisted() {
if (isPersisted) {
requestScheduleEntity.setUpdateTimestamp(System.currentTimeMillis());
// Update the Entity object with new settings
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1466,7 +1466,7 @@ public void refresh() {
* from {@link #readWriteLock}.
*/
@Transactional
private void saveIfPersisted() {
void saveIfPersisted() {
if (isPersisted()) {
hostComponentStateDAO.merge(stateEntity);
hostComponentDesiredStateDAO.merge(desiredStateEntity);
Expand Down Expand Up @@ -1705,7 +1705,7 @@ public void setRestartRequired(boolean restartRequired) {
}

@Transactional
private RepositoryVersionEntity createRepositoryVersion(String version, final StackId stackId, final StackInfo stackInfo) throws AmbariException {
RepositoryVersionEntity createRepositoryVersion(String version, final StackId stackId, final StackInfo stackInfo) throws AmbariException {
// During an Ambari Upgrade from 1.7.0 -> 2.0.0, the Repo Version will not exist, so bootstrap it.
LOG.info("Creating new repository version " + stackId.getStackName() + "-" + version);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ private void stopPersistenceService() {
* @throws SQLException
*/
@Transactional
private void updateMetaInfo(Map<String, String> data) throws SQLException {
void updateMetaInfo(Map<String, String> data) throws SQLException {
if (data != null && !data.isEmpty()) {
for (Map.Entry<String, String> entry : data.entrySet()) {
MetainfoEntity metainfoEntity = metainfoDAO.findByKey(entry.getKey());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -840,7 +840,7 @@ protected void initializeClusterAndServiceWidgets() throws AmbariException {
* @throws SQLException
*/
@Transactional
private Long populateHostsId(ResultSet resultSet) throws SQLException {
Long populateHostsId(ResultSet resultSet) throws SQLException {
Long hostId = 0L;
if (resultSet != null) {
try {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1355,7 +1355,7 @@ private void syncViewInstance(ViewInstanceEntity instance1, ViewInstanceEntity i

// create an admin resource for the given view instance entity and merge it
@Transactional
private ViewInstanceEntity mergeViewInstance(ViewInstanceEntity instanceEntity, ResourceTypeEntity resourceTypeEntity) {
ViewInstanceEntity mergeViewInstance(ViewInstanceEntity instanceEntity, ResourceTypeEntity resourceTypeEntity) {
// create an admin resource to represent this view instance
instanceEntity.setResource(createViewInstanceResource(resourceTypeEntity));

Expand Down Expand Up @@ -1610,7 +1610,7 @@ private boolean checkViewVersion(ViewEntity view, String version, String serverV

// persist the given view and its instances
@Transactional
private void persistView(ViewEntity viewDefinition, Set<ViewInstanceEntity> instanceDefinitions) throws Exception {
void persistView(ViewEntity viewDefinition, Set<ViewInstanceEntity> instanceDefinitions) throws Exception {
// ensure that the view entity matches the db
syncView(viewDefinition, instanceDefinitions);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ public void configure(Binder binder) {
}

@Transactional
private RequestExecution createRequestExecution(boolean addSchedule)
RequestExecution createRequestExecution(boolean addSchedule)
throws Exception {
Batch batches = new Batch();
Schedule schedule = new Schedule();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ public void teardown() throws AmbariException {
}

@Transactional
private ConfigGroup createConfigGroup() throws AmbariException {
ConfigGroup createConfigGroup() throws AmbariException {
// Create config without persisting and save group
Map<String, String> properties = new HashMap<String, String>();
properties.put("a", "b");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ public void tearDown() {
}

@Transactional
private Long addConfigGroup(String name, String tag, List<String> hosts,
Long addConfigGroup(String name, String tag, List<String> hosts,
List<Config> configs) throws AmbariException {

Map<Long, Host> hostMap = new HashMap<Long, Host>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ public void teardown() throws Exception {
}

@Transactional
private RequestExecution createRequestSchedule() throws Exception {
RequestExecution createRequestSchedule() throws Exception {
Batch batches = new Batch();
Schedule schedule = new Schedule();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -305,7 +305,7 @@ private void run() throws Exception {

// Creates default users and roles if in-memory database is used
@Transactional
private void addInMemoryUsers() {
void addInMemoryUsers() {
if (getPersistenceType(configuration) == PersistenceType.IN_MEMORY &&
configuration.getApiAuthentication()) {
LOG.info("In-memory database is used - creating default users");
Expand Down

0 comments on commit 59c8885

Please sign in to comment.