Skip to content

Commit

Permalink
GEODE-7218: Redundant addAll call.
Browse files Browse the repository at this point in the history
	* Using the constructor with initial value rather than using addAll after the constructor.
  • Loading branch information
nabarunnag committed Sep 19, 2019
1 parent 6bae15a commit ddd7304
Show file tree
Hide file tree
Showing 15 changed files with 34 additions and 50 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -348,7 +348,6 @@ private HashMap<ServerLocation, HashSet<Integer>> pruneNodes(ClientPartitionAdvi
logger.debug("ClientMetadataService: The server to buckets map is : {}", serverToBucketsMap);
}

HashSet<Integer> currentBucketSet = new HashSet<Integer>();
ServerLocation randomFirstServer = null;
if (serverToBucketsMap.isEmpty()) {
return null;
Expand All @@ -363,7 +362,7 @@ private HashMap<ServerLocation, HashSet<Integer>> pruneNodes(ClientPartitionAdvi
"ClientMetadataService: Adding the server : {} which is random and buckets {} to prunedMap",
randomFirstServer, bucketSet);
}
currentBucketSet.addAll(bucketSet);
HashSet<Integer> currentBucketSet = new HashSet<Integer>(bucketSet);
prunedServerToBucketsMap.put(randomFirstServer, bucketSet);
serverToBucketsMap.remove(randomFirstServer);

Expand Down Expand Up @@ -405,8 +404,7 @@ private ServerLocation findNextServer(Set<Map.Entry<ServerLocation, HashSet<Inte
int max = -1;
ArrayList<ServerLocation> nodesOfEqualSize = new ArrayList<ServerLocation>();
for (Map.Entry<ServerLocation, HashSet<Integer>> entry : entrySet) {
HashSet<Integer> buckets = new HashSet<Integer>();
buckets.addAll(entry.getValue());
HashSet<Integer> buckets = new HashSet<Integer>(entry.getValue());
buckets.removeAll(currentBucketSet);

if (max < buckets.size()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1272,8 +1272,7 @@ static SelectResults getRelationshipIndexResultsMergedWithIntermediateResults(
// intermediate resultset Identify the final List which will depend upon the complete
// expansion flag Identify the iterators to be expanded to, which will also depend upon
// complete expansion flag..
List totalExpList = new ArrayList();
totalExpList.addAll(singleUsableICH.expansionList);
List totalExpList = new ArrayList(singleUsableICH.expansionList);
if (completeExpansionNeeded) {
Support.Assert(expnItrsToIgnore != null,
"expnItrsToIgnore should not have been null as we are in this block itself indicates that intermediate results was not null");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,6 @@ public static PartitionedRegion getColocatedRegion(final PartitionedRegion parti
*/
public static boolean checkMembersColocation(PartitionedRegion partitionedRegion,
InternalDistributedMember member) {
List<PartitionRegionConfig> colocatedRegions = new ArrayList<PartitionRegionConfig>();
List<PartitionRegionConfig> tempcolocatedRegions = new ArrayList<PartitionRegionConfig>();
Region prRoot = PartitionedRegionHelper.getPRRoot(partitionedRegion.getCache());
PartitionRegionConfig regionConfig =
Expand All @@ -128,7 +127,8 @@ public static boolean checkMembersColocation(PartitionedRegion partitionedRegion
return false;
}
tempcolocatedRegions.add(regionConfig);
colocatedRegions.addAll(tempcolocatedRegions);
List<PartitionRegionConfig> colocatedRegions =
new ArrayList<PartitionRegionConfig>(tempcolocatedRegions);
PartitionRegionConfig prConf = null;
do {
PartitionRegionConfig tempToBeColocatedWith = tempcolocatedRegions.remove(0);
Expand Down Expand Up @@ -293,8 +293,8 @@ public static Map<String, PartitionedRegion> getAllColocationRegions(
Map<String, PartitionedRegion> colocatedRegions = new HashMap<String, PartitionedRegion>();
List<PartitionedRegion> colocatedByRegion = partitionedRegion.getColocatedByList();
if (colocatedByRegion.size() != 0) {
List<PartitionedRegion> tempcolocatedRegions = new ArrayList<PartitionedRegion>();
tempcolocatedRegions.addAll(colocatedByRegion);
List<PartitionedRegion> tempcolocatedRegions =
new ArrayList<PartitionedRegion>(colocatedByRegion);
do {
PartitionedRegion pRegion = tempcolocatedRegions.remove(0);
pRegion.waitOnBucketMetadataInitialization();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8168,8 +8168,7 @@ public void txDecRefCount(RegionEntry regionEntry) {
* Does not throw RegionDestroyedException even if destroyed
*/
List debugGetSubregionNames() {
List names = new ArrayList();
names.addAll(subregions.keySet());
List names = new ArrayList(subregions.keySet());
return names;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6636,8 +6636,7 @@ public int entryCount(Set<Integer> buckets, boolean estimate) {
Map<Integer, SizeEntry> bucketSizes = null;
if (buckets != null) {
if (this.dataStore != null) {
List<Integer> list = new ArrayList<Integer>();
list.addAll(buckets);
List<Integer> list = new ArrayList<Integer>(buckets);
bucketSizes = this.dataStore.getSizeLocallyForBuckets(list);
}
} else {
Expand Down Expand Up @@ -9384,11 +9383,10 @@ public List<BucketRegion> getSortedBuckets() {
HeapEvictor.BUCKET_SORTING_INTERVAL);
}
}
List<BucketRegion> bucketList = new ArrayList<>();
if (!bucketSortedOnce.get()) {
while (bucketSortedOnce.get() == false);
}
bucketList.addAll(this.sortedBuckets);
List<BucketRegion> bucketList = new ArrayList<>(this.sortedBuckets);
return bucketList;
}

Expand Down Expand Up @@ -10038,8 +10036,7 @@ public void shadowPRWaitForBucketRecovery() {
// and primary nodes have been decided.
// This is required in case of persistent PR and sender.
Set<Integer> allBuckets = userPR.getDataStore().getAllLocalBucketIds();
Set<Integer> allBucketsClone = new HashSet<Integer>();
allBucketsClone.addAll(allBuckets);
Set<Integer> allBucketsClone = new HashSet<Integer>(allBuckets);
while (allBucketsClone.size() != 0) {
logger.debug(
"Need to wait until partitionedRegionQueue <<{}>> is loaded with all the buckets",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -150,8 +150,7 @@ private static InternalDistributedMember findNextNode(
ArrayList<InternalDistributedMember> nodesOfEqualSize =
new ArrayList<InternalDistributedMember>();
for (Map.Entry<InternalDistributedMember, HashSet<Integer>> entry : entrySet) {
HashSet<Integer> buckets = new HashSet<Integer>();
buckets.addAll(entry.getValue());
HashSet<Integer> buckets = new HashSet<Integer>(entry.getValue());
buckets.removeAll(currentBucketSet);

if (max < buckets.size()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -199,8 +199,8 @@ private static Map<Version, Map<Integer, Command>> initializeAllCommands() {
gfe58Commands.put(MessageType.EXECUTE_FUNCTION, ExecuteFunction.getCommand());

// Initialize the GFE 6.0.3 commands map
Map<Integer, Command> gfe603Commands = new HashMap<Integer, Command>();
gfe603Commands.putAll(allCommands.get(Version.GFE_58));
Map<Integer, Command> gfe603Commands =
new HashMap<Integer, Command>(allCommands.get(Version.GFE_58));
allCommands.put(Version.GFE_603, gfe603Commands);

// Initialize the GFE 6.1 commands
Expand Down Expand Up @@ -299,25 +299,23 @@ private static Map<Version, Map<Integer, Command>> initializeAllCommands() {
allCommands.put(Version.GFE_7099, gfe70Commands);
allCommands.put(Version.GFE_71, gfe70Commands);

Map<Integer, Command> gfe80Commands = new HashMap<Integer, Command>();
gfe80Commands.putAll(allCommands.get(Version.GFE_71));
Map<Integer, Command> gfe80Commands =
new HashMap<Integer, Command>(allCommands.get(Version.GFE_71));
allCommands.put(Version.GFE_80, gfe80Commands);
// PutAll is changed to chunk responses back to the client
gfe80Commands.put(MessageType.PUTALL, PutAll80.getCommand());

allCommands.put(Version.GFE_8009, gfe80Commands);

Map<Integer, Command> gfe81Commands = new HashMap<Integer, Command>();
gfe81Commands.putAll(gfe80Commands);
Map<Integer, Command> gfe81Commands = new HashMap<Integer, Command>(gfe80Commands);
gfe81Commands.put(MessageType.GET_ALL_WITH_CALLBACK, GetAllWithCallback.getCommand());
gfe81Commands.put(MessageType.PUT_ALL_WITH_CALLBACK, PutAllWithCallback.getCommand());
gfe81Commands.put(MessageType.REMOVE_ALL, RemoveAll.getCommand());

allCommands.put(Version.GFE_81, gfe81Commands);
allCommands.put(Version.GFE_82, gfe81Commands);

Map<Integer, Command> commands = new HashMap<Integer, Command>();
commands.putAll(allCommands.get(Version.GFE_82));
Map<Integer, Command> commands = new HashMap<Integer, Command>(allCommands.get(Version.GFE_82));
allCommands.put(Version.GFE_90, commands);
commands.put(MessageType.QUERY_WITH_PARAMETERS, QueryWithParametersGeode10.getCommand());
commands.put(MessageType.QUERY, QueryGeode10.getCommand());
Expand All @@ -331,8 +329,8 @@ private static Map<Version, Map<Integer, Command>> initializeAllCommands() {
allCommands.put(Version.GEODE_1_6_0, commands);
allCommands.put(Version.GEODE_1_7_0, commands);

Map<Integer, Command> geode18Commands = new HashMap<Integer, Command>();
geode18Commands.putAll(allCommands.get(Version.GEODE_1_7_0));
Map<Integer, Command> geode18Commands =
new HashMap<Integer, Command>(allCommands.get(Version.GEODE_1_7_0));
geode18Commands.put(MessageType.EXECUTE_REGION_FUNCTION,
ExecuteRegionFunctionGeode18.getCommand());
allCommands.put(Version.GEODE_1_8_0, geode18Commands);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -510,9 +510,8 @@ protected void processQueue() {
beforeExecute();
try {
// this list is access by ack reader thread so create new every time. #50220
filteredList = new ArrayList<GatewaySenderEventImpl>();

filteredList.addAll(events);
filteredList = new ArrayList<GatewaySenderEventImpl>(events);

// If the exception has been set and its cause is an IllegalStateExcetption,
// remove all events whose serialized value is no longer available
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1684,8 +1684,7 @@ public void run() {
}
}

final HashMap<String, Map<Integer, List>> temp =
new HashMap<String, Map<Integer, List>>();
final HashMap<String, Map<Integer, List>> temp;
buckToDispatchLock.lock();
try {
boolean wasEmpty = regionToDispatchedKeysMap.isEmpty();
Expand All @@ -1695,7 +1694,7 @@ public void run() {
if (wasEmpty)
continue;
// TODO: This should be optimized.
temp.putAll(regionToDispatchedKeysMap);
temp = new HashMap<String, Map<Integer, List>>(regionToDispatchedKeysMap);
regionToDispatchedKeysMap.clear();
} finally {
buckToDispatchLock.unlock();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -404,9 +404,9 @@ protected void cleanUp() {
// Asif : Create a temp list which copies the connections in
// the expired list & releases the lock on expired list
// immediately . Then it is safe to clear the expiredConnList
List temp = new ArrayList();
List temp;
synchronized (this.expiredConns) {
temp.addAll(this.expiredConns);
temp = new ArrayList(this.expiredConns);
this.expiredConns.clear();
}
// Asif: destroy the connections contained in the temp list
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1012,8 +1012,7 @@ public ObjectName[] fetchRegionObjectNames(ObjectName memberMBeanName) throws Ex
}

public ObjectName[] listDistributedRegionObjectNames() {
List<ObjectName> list = new ArrayList<>();
list.addAll(distrRegionMap.keySet());
List<ObjectName> list = new ArrayList<>(distrRegionMap.keySet());
ObjectName[] objNames = new ObjectName[list.size()];
return list.toArray(objNames);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,8 +109,7 @@ void checkForJDBCMapping(String regionPath) {
return;
}

Set<String> groupNames = new HashSet<String>();
groupNames.addAll(ccService.getGroups());
Set<String> groupNames = new HashSet<String>(ccService.getGroups());
groupNames.add("cluster");
for (String groupName : groupNames) {
CacheConfig cacheConfig = ccService.getCacheConfig(groupName);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,6 @@ public ResultModel listRegion(
help = CliStrings.LIST_REGION__MEMBER__HELP) String[] memberNameOrId) {
ResultModel result = new ResultModel();

Set<RegionInformation> regionInfoSet = new LinkedHashSet<>();
ResultCollector<?, ?> rc;
Set<DistributedMember> targetMembers = findMembers(group, memberNameOrId);

Expand All @@ -72,10 +71,11 @@ public ResultModel listRegion(

if (resultList != null) {
// Gather all RegionInformation into a flat set.
regionInfoSet.addAll(resultList.stream().filter(Objects::nonNull)
.filter(Object[].class::isInstance).map(Object[].class::cast).flatMap(Arrays::stream)
.filter(RegionInformation.class::isInstance).map(RegionInformation.class::cast)
.collect(Collectors.toSet()));
Set<RegionInformation> regionInfoSet =
new LinkedHashSet<>(resultList.stream().filter(Objects::nonNull)
.filter(Object[].class::isInstance).map(Object[].class::cast).flatMap(Arrays::stream)
.filter(RegionInformation.class::isInstance).map(RegionInformation.class::cast)
.collect(Collectors.toSet()));

Set<String> regionNames = new TreeSet<>();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,7 @@ public void addRow(String... values) {
@JsonIgnore
public List<String> getHeaders() {
// this should maintain the original insertion order
List<String> headers = new ArrayList<>();
headers.addAll(table.keySet());
List<String> headers = new ArrayList<>(table.keySet());
return headers;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -999,8 +999,7 @@ public String getEnvAppContextPath() {
}

public Map<String, String> getEnv() {
Map<String, String> map = new TreeMap<>();
map.putAll(env);
Map<String, String> map = new TreeMap<>(env);
return map;
}

Expand Down

0 comments on commit ddd7304

Please sign in to comment.