diff --git a/geode-core/src/distributedTest/java/org/apache/geode/distributed/internal/ClusterDistributionManagerDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/distributed/internal/ClusterDistributionManagerDUnitTest.java index 3e3ab70800fd..78a9a89eb045 100644 --- a/geode-core/src/distributedTest/java/org/apache/geode/distributed/internal/ClusterDistributionManagerDUnitTest.java +++ b/geode-core/src/distributedTest/java/org/apache/geode/distributed/internal/ClusterDistributionManagerDUnitTest.java @@ -294,6 +294,8 @@ public void testAckSevereAlertThreshold() { Region region = regionFactory.create("testRegion"); addIgnoredException("elapsed while waiting for replies"); + // Ignore logging from Connection.doSevereAlertProcessing() + addIgnoredException("seconds have elapsed waiting for a response from"); vm1.invoke("Connect to distributed system", () -> { config.setProperty(NAME, "sleeper"); getSystem(config); @@ -345,6 +347,8 @@ public void testKickOutSickMember() { Region region = regionFactory.create("testRegion"); addIgnoredException("sec have elapsed while waiting for replies"); + // Ignore logging from Connection.doSevereAlertProcessing() + addIgnoredException("seconds have elapsed waiting for a response from"); vm1.invoke(new SerializableRunnable("Connect to distributed system") { @Override diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/tcp/CloseConnectionTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/tcp/CloseConnectionTest.java index 78a51532254f..cdb54323996a 100644 --- a/geode-core/src/distributedTest/java/org/apache/geode/internal/tcp/CloseConnectionTest.java +++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/tcp/CloseConnectionTest.java @@ -14,6 +14,7 @@ */ package org.apache.geode.internal.tcp; +import static org.apache.geode.distributed.ConfigurationProperties.CONSERVE_SOCKETS; import static org.apache.geode.test.awaitility.GeodeAwaitility.await; import static org.apache.geode.test.dunit.Disconnect.disconnectAllFromDS; import static org.apache.geode.test.dunit.VM.getController; @@ -40,14 +41,19 @@ import org.apache.geode.test.dunit.rules.DistributedRule; public class CloseConnectionTest implements Serializable { + private static final long serialVersionUID = 3692493564204797623L; private VM vm0; private VM vm1; @Rule public DistributedRule distributedRule = new DistributedRule(); + /** + * The test case in this class requires that conserve-sockets=true in order for the connections to + * be shared. + */ @Rule - public CacheRule cacheRule = new CacheRule(); + public CacheRule cacheRule = new CacheRule.Builder().addConfig(CONSERVE_SOCKETS, "true").build(); @Before public void setUp() { diff --git a/geode-core/src/main/java/org/apache/geode/distributed/ConfigurationProperties.java b/geode-core/src/main/java/org/apache/geode/distributed/ConfigurationProperties.java index 22a8fbc3fac0..1955210e2dac 100644 --- a/geode-core/src/main/java/org/apache/geode/distributed/ConfigurationProperties.java +++ b/geode-core/src/main/java/org/apache/geode/distributed/ConfigurationProperties.java @@ -338,7 +338,7 @@ public interface ConfigurationProperties { * "false" then every application thread that sends distribution messages to other members of the * distributed system will own its own sockets and have exclusive access to them. The length of * time a thread can have exclusive access to a socket can be configured with "socket-lease-time". - * Default: "true" + * Default: "false" *

* Allowed values: true|false *

diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionConfig.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionConfig.java index 9aad59232672..6f6f5233f268 100644 --- a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionConfig.java +++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionConfig.java @@ -1911,7 +1911,7 @@ static InetAddress _getDefaultMcastAddress() { /** * The default value of the {@link ConfigurationProperties#CONSERVE_SOCKETS} property */ - boolean DEFAULT_CONSERVE_SOCKETS = true; + boolean DEFAULT_CONSERVE_SOCKETS = false; /** * Returns the value of the {@link ConfigurationProperties#ROLES} property diff --git a/geode-docs/managing/logging/logging_categories.html.md.erb b/geode-docs/managing/logging/logging_categories.html.md.erb index 9ed450e9deba..66b0854cc080 100644 --- a/geode-docs/managing/logging/logging_categories.html.md.erb +++ b/geode-docs/managing/logging/logging_categories.html.md.erb @@ -211,7 +211,7 @@ These are the levels, in descending order, with sample output: bind-address="" cache-xml-file="cache.xml" conflate-events="server" - conserve-sockets="true" + conserve-sockets="false" ... socket-buffer-size="32768" socket-lease-time="60000" diff --git a/geode-docs/managing/monitor_tune/socket_communication_have_enough_sockets.html.md.erb b/geode-docs/managing/monitor_tune/socket_communication_have_enough_sockets.html.md.erb index 1c52ed5dbf95..644c0f322389 100644 --- a/geode-docs/managing/monitor_tune/socket_communication_have_enough_sockets.html.md.erb +++ b/geode-docs/managing/monitor_tune/socket_communication_have_enough_sockets.html.md.erb @@ -30,7 +30,7 @@ You can configure socket sharing for peer-to-peer: - **Peer-to-peer**. You can configure whether your members share sockets both at the application level and at the thread level. To enable sharing at the application level, set the `gemfire.properties` property `conserve-sockets` to `true`. To achieve maximum throughput, however, we -recommend that you set `conserve-sockets` to `false`. +recommend that you use the default value of `false`. At the thread level, developers can override this setting by using the DistributedSystem API method `setThreadsSocketPolicy`. You might want to enable socket sharing at the application level and then have threads that do a lot of cache work take sole ownership of their sockets. Make sure to program these threads to release their sockets as soon as possible using the `releaseThreadsSockets` method, rather than waiting for a timeout or thread death. diff --git a/geode-docs/managing/monitor_tune/sockets_and_gateways.html.md.erb b/geode-docs/managing/monitor_tune/sockets_and_gateways.html.md.erb index 8e1e5876698d..b21594d1e0f0 100644 --- a/geode-docs/managing/monitor_tune/sockets_and_gateways.html.md.erb +++ b/geode-docs/managing/monitor_tune/sockets_and_gateways.html.md.erb @@ -73,7 +73,7 @@ If possible, your TCP/IP buffer size settings should match across your installat ``` **Note:** -WAN deployments increase the messaging demands on a <%=vars.product_name%> system. To avoid hangs related to WAN messaging, always set `conserve-sockets=false` for GemFire members that participate in a WAN deployment. +WAN deployments increase the messaging demands on a <%=vars.product_name%> system. To avoid hangs related to WAN messaging, always use the default setting of conserve-sockets=false for <%=vars.product_name%> members that participate in a WAN deployment. ## Multi-site (WAN) Socket Requirements diff --git a/geode-docs/reference/topics/gemfire_properties.html.md.erb b/geode-docs/reference/topics/gemfire_properties.html.md.erb index 8de4d71b281a..3386423ce0de 100644 --- a/geode-docs/reference/topics/gemfire_properties.html.md.erb +++ b/geode-docs/reference/topics/gemfire_properties.html.md.erb @@ -129,9 +129,9 @@ Valid values are in the range 0...2147483647 conserve-sockets -Specifies whether sockets are shared by the system member’s threads. If true, threads share, and a minimum number of sockets are used to connect to the cluster. If false, every application thread has its own sockets for distribution purposes. You can override this setting for individual threads inside your application. Where possible, it is better to set conserve-sockets to true and enable the use of specific extra sockets in the application code if needed. WAN deployments increase the messaging demands on a <%=vars.product_name%> system. To avoid hangs related to WAN messaging, always set conserve-sockets=false for <%=vars.product_name%> members that participate in a WAN deployment. +Specifies whether sockets are shared by the system member’s threads. If true, threads share, and a minimum number of sockets are used to connect to the cluster. If false, every application thread has its own sockets for distribution purposes. You can override this setting for individual threads inside your application. WAN deployments increase the messaging demands on a <%=vars.product_name%> system. To avoid hangs related to WAN messaging, always use the default setting of conserve-sockets=false for <%=vars.product_name%> members that participate in a WAN deployment. S, L -true +false delta-propagation diff --git a/geode-docs/reference/topics/memory_requirements_for_cache_data.html.md.erb b/geode-docs/reference/topics/memory_requirements_for_cache_data.html.md.erb index ba446766a719..bf6dfd276d3b 100644 --- a/geode-docs/reference/topics/memory_requirements_for_cache_data.html.md.erb +++ b/geode-docs/reference/topics/memory_requirements_for_cache_data.html.md.erb @@ -224,16 +224,16 @@ A note of caution-- if the domain object contains many domain objects as member Servers always maintain two outgoing connections to each of their peers. So for each peer a server has, there are four total connections: two going out to the peer and two coming in from the peer. -The server threads that service client requests also communicate with peers to distribute events and forward client requests. If the server's <%=vars.product_name%> connection property *conserve-sockets* is set to true (the default), these threads use the already-established peer connections for this communication. +The server threads that service client requests also communicate with peers to distribute events and forward client requests. If the server's <%=vars.product_name%> connection property *conserve-sockets* is set to true, these threads use the already-established peer connections for this communication. -If *conserve-sockets* is false, each thread that services clients establishes two of its own individual connections to its server peers, one to send, and one to receive. Each socket uses a file descriptor, so the number of available sockets is governed by two operating system settings: +If *conserve-sockets* is false (the default), each thread that services clients establishes two of its own individual connections to its server peers, one to send, and one to receive. Each socket uses a file descriptor, so the number of available sockets is governed by two operating system settings: - maximum open files allowed on the system as a whole - maximum open files allowed for each session In servers with many threads servicing clients, if *conserve-sockets* is set to false, the demand for connections can easily overrun the number of available sockets. Even with *conserve-sockets* set to false, you can cap the number of these connections by setting the server's *max-threads* parameter. -Since each client connection takes one server socket on a thread to handle the connection, and since that server acts as a proxy on partitioned regions to get results, or execute the function service on behalf of the client, for partitioned regions, if conserve sockets is set to false, this also results in a new socket on the server being opened to each peer. Thus N sockets are opened, where N is the number of peers. Large number of clients simultaneously connecting to a large set of peers with a partitioned region with conserve sockets set to false can cause a huge amount of memory to be consumed by socket. Set conserve-sockets to true in these instances. +Since each client connection takes one server socket on a thread to handle the connection, and since that server acts as a proxy on partitioned regions to get results, or execute the function service on behalf of the client, for partitioned regions, if conserve-sockets is set to false, this also results in a new socket on the server being opened to each peer. Thus N sockets are opened, where N is the number of peers. Large number of clients simultaneously connecting to a large set of peers with a partitioned region with conserve sockets set to false can cause a large amount of memory to be consumed by sockets. **Note:** There is also JVM overhead for the thread stack for each client connection being processed, set at 256KB or 512KB for most JVMs . On some JVMs you can reduce it to 128KB. You can use the <%=vars.product_name%> `max-threads` property or the <%=vars.product_name%> `max-connections` property to limit the number of client threads and thus both thread overhead and socket overhead. @@ -262,15 +262,15 @@ The following table lists the memory requirements based on connections. = (lesser of max-threads property on server or max-connections)* (socket buffer size +thread overhead for the JVM ) -Per member of the cluster if conserve sockets is set to true +Per member of the cluster if conserve-sockets is set to true 4* number of peers -Per member, if conserve sockets is set to false +Per member, if conserve-sockets is set to false 4 * number of peers hosting that region* number of threads -If member hosts a Partitioned Region, If conserve sockets set to false and it is a Server (this is cumulative with the above) +If member hosts a Partitioned Region, If conserve-sockets set to false and it is a Server (this is cumulative with the above)

=< max-threads * 2 * number of peers

Note: diff --git a/geode-docs/topologies_and_comm/multi_site_configuration/setting_up_a_multisite_system.html.md.erb b/geode-docs/topologies_and_comm/multi_site_configuration/setting_up_a_multisite_system.html.md.erb index d3bfa4a2c409..d7ba5e4ce02e 100644 --- a/geode-docs/topologies_and_comm/multi_site_configuration/setting_up_a_multisite_system.html.md.erb +++ b/geode-docs/topologies_and_comm/multi_site_configuration/setting_up_a_multisite_system.html.md.erb @@ -25,7 +25,7 @@ Plan and configure your multi-site topology, and configure the regions that will Before you start, you should understand how to configure membership and communication in peer-to-peer systems using locators. See [Configuring Peer-to-Peer Discovery](../p2p_configuration/setting_up_a_p2p_system.html) and [Configuring Peer Communication](../p2p_configuration/setting_up_peer_communication.html). -WAN deployments increase the messaging demands on a <%=vars.product_name%> system. To avoid hangs related to WAN messaging, always set `conserve-sockets=false` for <%=vars.product_name%> members that participate in a WAN deployment. See [Configuring Sockets in Multi-Site (WAN) Deployments](../../managing/monitor_tune/sockets_and_gateways.html) and [Making Sure You Have Enough Sockets](../../managing/monitor_tune/socket_communication_have_enough_sockets.html). +WAN deployments increase the messaging demands on a <%=vars.product_name%> system. To avoid hangs related to WAN messaging, always use the default setting of conserve-sockets=false for <%=vars.product_name%> members that participate in a WAN deployment. See [Configuring Sockets in Multi-Site (WAN) Deployments](../../managing/monitor_tune/sockets_and_gateways.html) and [Making Sure You Have Enough Sockets](../../managing/monitor_tune/socket_communication_have_enough_sockets.html). ## Main Steps