diff --git a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ClientMetadataService.java b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ClientMetadataService.java index ab2d9a454e1c..7cb47ce1a46c 100755 --- a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ClientMetadataService.java +++ b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ClientMetadataService.java @@ -348,7 +348,6 @@ private HashMap> pruneNodes(ClientPartitionAdvi logger.debug("ClientMetadataService: The server to buckets map is : {}", serverToBucketsMap); } - HashSet currentBucketSet = new HashSet(); ServerLocation randomFirstServer = null; if (serverToBucketsMap.isEmpty()) { return null; @@ -363,7 +362,7 @@ private HashMap> pruneNodes(ClientPartitionAdvi "ClientMetadataService: Adding the server : {} which is random and buckets {} to prunedMap", randomFirstServer, bucketSet); } - currentBucketSet.addAll(bucketSet); + HashSet currentBucketSet = new HashSet(bucketSet); prunedServerToBucketsMap.put(randomFirstServer, bucketSet); serverToBucketsMap.remove(randomFirstServer); @@ -405,8 +404,7 @@ private ServerLocation findNextServer(Set nodesOfEqualSize = new ArrayList(); for (Map.Entry> entry : entrySet) { - HashSet buckets = new HashSet(); - buckets.addAll(entry.getValue()); + HashSet buckets = new HashSet(entry.getValue()); buckets.removeAll(currentBucketSet); if (max < buckets.size()) { diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/QueryUtils.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/QueryUtils.java index 33a0281c4bf0..7378e9f324d6 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/QueryUtils.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/QueryUtils.java @@ -1272,8 +1272,7 @@ static SelectResults getRelationshipIndexResultsMergedWithIntermediateResults( // intermediate resultset Identify the final List which will depend upon the complete // expansion flag Identify the iterators to be expanded to, which will also depend upon // complete expansion flag.. - List totalExpList = new ArrayList(); - totalExpList.addAll(singleUsableICH.expansionList); + List totalExpList = new ArrayList(singleUsableICH.expansionList); if (completeExpansionNeeded) { Support.Assert(expnItrsToIgnore != null, "expnItrsToIgnore should not have been null as we are in this block itself indicates that intermediate results was not null"); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java b/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java index fac58c10a904..cbe426af8b89 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java @@ -118,7 +118,6 @@ public static PartitionedRegion getColocatedRegion(final PartitionedRegion parti */ public static boolean checkMembersColocation(PartitionedRegion partitionedRegion, InternalDistributedMember member) { - List colocatedRegions = new ArrayList(); List tempcolocatedRegions = new ArrayList(); Region prRoot = PartitionedRegionHelper.getPRRoot(partitionedRegion.getCache()); PartitionRegionConfig regionConfig = @@ -128,7 +127,8 @@ public static boolean checkMembersColocation(PartitionedRegion partitionedRegion return false; } tempcolocatedRegions.add(regionConfig); - colocatedRegions.addAll(tempcolocatedRegions); + List colocatedRegions = + new ArrayList(tempcolocatedRegions); PartitionRegionConfig prConf = null; do { PartitionRegionConfig tempToBeColocatedWith = tempcolocatedRegions.remove(0); @@ -293,8 +293,8 @@ public static Map getAllColocationRegions( Map colocatedRegions = new HashMap(); List colocatedByRegion = partitionedRegion.getColocatedByList(); if (colocatedByRegion.size() != 0) { - List tempcolocatedRegions = new ArrayList(); - tempcolocatedRegions.addAll(colocatedByRegion); + List tempcolocatedRegions = + new ArrayList(colocatedByRegion); do { PartitionedRegion pRegion = tempcolocatedRegions.remove(0); pRegion.waitOnBucketMetadataInitialization(); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java index 5637978e4eed..c698c926463b 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java @@ -8168,8 +8168,7 @@ public void txDecRefCount(RegionEntry regionEntry) { * Does not throw RegionDestroyedException even if destroyed */ List debugGetSubregionNames() { - List names = new ArrayList(); - names.addAll(subregions.keySet()); + List names = new ArrayList(subregions.keySet()); return names; } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java index 075e3d0deec2..69c1c96cf22f 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java @@ -6636,8 +6636,7 @@ public int entryCount(Set buckets, boolean estimate) { Map bucketSizes = null; if (buckets != null) { if (this.dataStore != null) { - List list = new ArrayList(); - list.addAll(buckets); + List list = new ArrayList(buckets); bucketSizes = this.dataStore.getSizeLocallyForBuckets(list); } } else { @@ -9384,11 +9383,10 @@ public List getSortedBuckets() { HeapEvictor.BUCKET_SORTING_INTERVAL); } } - List bucketList = new ArrayList<>(); if (!bucketSortedOnce.get()) { while (bucketSortedOnce.get() == false); } - bucketList.addAll(this.sortedBuckets); + List bucketList = new ArrayList<>(this.sortedBuckets); return bucketList; } @@ -10038,8 +10036,7 @@ public void shadowPRWaitForBucketRecovery() { // and primary nodes have been decided. // This is required in case of persistent PR and sender. Set allBuckets = userPR.getDataStore().getAllLocalBucketIds(); - Set allBucketsClone = new HashSet(); - allBucketsClone.addAll(allBuckets); + Set allBucketsClone = new HashSet(allBuckets); while (allBucketsClone.size() != 0) { logger.debug( "Need to wait until partitionedRegionQueue <<{}>> is loaded with all the buckets", diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionExecutionNodePruner.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionExecutionNodePruner.java index 59b634e12bb6..3cbefc537ad2 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionExecutionNodePruner.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionExecutionNodePruner.java @@ -150,8 +150,7 @@ private static InternalDistributedMember findNextNode( ArrayList nodesOfEqualSize = new ArrayList(); for (Map.Entry> entry : entrySet) { - HashSet buckets = new HashSet(); - buckets.addAll(entry.getValue()); + HashSet buckets = new HashSet(entry.getValue()); buckets.removeAll(currentBucketSet); if (max < buckets.size()) { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CommandInitializer.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CommandInitializer.java index 10cef2fafe98..f03b0dde2749 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CommandInitializer.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/CommandInitializer.java @@ -199,8 +199,8 @@ private static Map> initializeAllCommands() { gfe58Commands.put(MessageType.EXECUTE_FUNCTION, ExecuteFunction.getCommand()); // Initialize the GFE 6.0.3 commands map - Map gfe603Commands = new HashMap(); - gfe603Commands.putAll(allCommands.get(Version.GFE_58)); + Map gfe603Commands = + new HashMap(allCommands.get(Version.GFE_58)); allCommands.put(Version.GFE_603, gfe603Commands); // Initialize the GFE 6.1 commands @@ -299,16 +299,15 @@ private static Map> initializeAllCommands() { allCommands.put(Version.GFE_7099, gfe70Commands); allCommands.put(Version.GFE_71, gfe70Commands); - Map gfe80Commands = new HashMap(); - gfe80Commands.putAll(allCommands.get(Version.GFE_71)); + Map gfe80Commands = + new HashMap(allCommands.get(Version.GFE_71)); allCommands.put(Version.GFE_80, gfe80Commands); // PutAll is changed to chunk responses back to the client gfe80Commands.put(MessageType.PUTALL, PutAll80.getCommand()); allCommands.put(Version.GFE_8009, gfe80Commands); - Map gfe81Commands = new HashMap(); - gfe81Commands.putAll(gfe80Commands); + Map gfe81Commands = new HashMap(gfe80Commands); gfe81Commands.put(MessageType.GET_ALL_WITH_CALLBACK, GetAllWithCallback.getCommand()); gfe81Commands.put(MessageType.PUT_ALL_WITH_CALLBACK, PutAllWithCallback.getCommand()); gfe81Commands.put(MessageType.REMOVE_ALL, RemoveAll.getCommand()); @@ -316,8 +315,7 @@ private static Map> initializeAllCommands() { allCommands.put(Version.GFE_81, gfe81Commands); allCommands.put(Version.GFE_82, gfe81Commands); - Map commands = new HashMap(); - commands.putAll(allCommands.get(Version.GFE_82)); + Map commands = new HashMap(allCommands.get(Version.GFE_82)); allCommands.put(Version.GFE_90, commands); commands.put(MessageType.QUERY_WITH_PARAMETERS, QueryWithParametersGeode10.getCommand()); commands.put(MessageType.QUERY, QueryGeode10.getCommand()); @@ -331,8 +329,8 @@ private static Map> initializeAllCommands() { allCommands.put(Version.GEODE_1_6_0, commands); allCommands.put(Version.GEODE_1_7_0, commands); - Map geode18Commands = new HashMap(); - geode18Commands.putAll(allCommands.get(Version.GEODE_1_7_0)); + Map geode18Commands = + new HashMap(allCommands.get(Version.GEODE_1_7_0)); geode18Commands.put(MessageType.EXECUTE_REGION_FUNCTION, ExecuteRegionFunctionGeode18.getCommand()); allCommands.put(Version.GEODE_1_8_0, geode18Commands); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySenderEventProcessor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySenderEventProcessor.java index e8ecdf48f209..cc49c62114b0 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySenderEventProcessor.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySenderEventProcessor.java @@ -510,9 +510,8 @@ protected void processQueue() { beforeExecute(); try { // this list is access by ack reader thread so create new every time. #50220 - filteredList = new ArrayList(); - filteredList.addAll(events); + filteredList = new ArrayList(events); // If the exception has been set and its cause is an IllegalStateExcetption, // remove all events whose serialized value is no longer available diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderQueue.java b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderQueue.java index 6bb0020dbd93..d66f395a3486 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderQueue.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderQueue.java @@ -1684,8 +1684,7 @@ public void run() { } } - final HashMap> temp = - new HashMap>(); + final HashMap> temp; buckToDispatchLock.lock(); try { boolean wasEmpty = regionToDispatchedKeysMap.isEmpty(); @@ -1695,7 +1694,7 @@ public void run() { if (wasEmpty) continue; // TODO: This should be optimized. - temp.putAll(regionToDispatchedKeysMap); + temp = new HashMap>(regionToDispatchedKeysMap); regionToDispatchedKeysMap.clear(); } finally { buckToDispatchLock.unlock(); diff --git a/geode-core/src/main/java/org/apache/geode/internal/datasource/AbstractPoolCache.java b/geode-core/src/main/java/org/apache/geode/internal/datasource/AbstractPoolCache.java index 89e8a99e6541..de611a465f2d 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/datasource/AbstractPoolCache.java +++ b/geode-core/src/main/java/org/apache/geode/internal/datasource/AbstractPoolCache.java @@ -404,9 +404,9 @@ protected void cleanUp() { // Asif : Create a temp list which copies the connections in // the expired list & releases the lock on expired list // immediately . Then it is safe to clear the expiredConnList - List temp = new ArrayList(); + List temp; synchronized (this.expiredConns) { - temp.addAll(this.expiredConns); + temp = new ArrayList(this.expiredConns); this.expiredConns.clear(); } // Asif: destroy the connections contained in the temp list diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/beans/DistributedSystemBridge.java b/geode-core/src/main/java/org/apache/geode/management/internal/beans/DistributedSystemBridge.java index 6f1370627739..4f410866f739 100644 --- a/geode-core/src/main/java/org/apache/geode/management/internal/beans/DistributedSystemBridge.java +++ b/geode-core/src/main/java/org/apache/geode/management/internal/beans/DistributedSystemBridge.java @@ -1012,8 +1012,7 @@ public ObjectName[] fetchRegionObjectNames(ObjectName memberMBeanName) throws Ex } public ObjectName[] listDistributedRegionObjectNames() { - List list = new ArrayList<>(); - list.addAll(distrRegionMap.keySet()); + List list = new ArrayList<>(distrRegionMap.keySet()); ObjectName[] objNames = new ObjectName[list.size()]; return list.toArray(objNames); } diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DestroyRegionCommand.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DestroyRegionCommand.java index 52c3417d0527..7e23195b70cb 100644 --- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DestroyRegionCommand.java +++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DestroyRegionCommand.java @@ -109,8 +109,7 @@ void checkForJDBCMapping(String regionPath) { return; } - Set groupNames = new HashSet(); - groupNames.addAll(ccService.getGroups()); + Set groupNames = new HashSet(ccService.getGroups()); groupNames.add("cluster"); for (String groupName : groupNames) { CacheConfig cacheConfig = ccService.getCacheConfig(groupName); diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ListRegionCommand.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ListRegionCommand.java index d0bf4bc260fb..cff867059826 100644 --- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ListRegionCommand.java +++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ListRegionCommand.java @@ -58,7 +58,6 @@ public ResultModel listRegion( help = CliStrings.LIST_REGION__MEMBER__HELP) String[] memberNameOrId) { ResultModel result = new ResultModel(); - Set regionInfoSet = new LinkedHashSet<>(); ResultCollector rc; Set targetMembers = findMembers(group, memberNameOrId); @@ -72,10 +71,11 @@ public ResultModel listRegion( if (resultList != null) { // Gather all RegionInformation into a flat set. - regionInfoSet.addAll(resultList.stream().filter(Objects::nonNull) - .filter(Object[].class::isInstance).map(Object[].class::cast).flatMap(Arrays::stream) - .filter(RegionInformation.class::isInstance).map(RegionInformation.class::cast) - .collect(Collectors.toSet())); + Set regionInfoSet = + new LinkedHashSet<>(resultList.stream().filter(Objects::nonNull) + .filter(Object[].class::isInstance).map(Object[].class::cast).flatMap(Arrays::stream) + .filter(RegionInformation.class::isInstance).map(RegionInformation.class::cast) + .collect(Collectors.toSet())); Set regionNames = new TreeSet<>(); diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/result/model/TabularResultModel.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/result/model/TabularResultModel.java index 47e2d19e34a5..def76867be75 100644 --- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/result/model/TabularResultModel.java +++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/result/model/TabularResultModel.java @@ -75,8 +75,7 @@ public void addRow(String... values) { @JsonIgnore public List getHeaders() { // this should maintain the original insertion order - List headers = new ArrayList<>(); - headers.addAll(table.keySet()); + List headers = new ArrayList<>(table.keySet()); return headers; } diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/shell/Gfsh.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/shell/Gfsh.java index 9db81a5d8b89..fa3c7e4f2fd5 100755 --- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/shell/Gfsh.java +++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/shell/Gfsh.java @@ -999,8 +999,7 @@ public String getEnvAppContextPath() { } public Map getEnv() { - Map map = new TreeMap<>(); - map.putAll(env); + Map map = new TreeMap<>(env); return map; }