Skip to content

Commit

Permalink
GEODE-9562: add system property for redis region name (apache#6960)
Browse files Browse the repository at this point in the history
Adds a system property (geode-for-redis-region-name) that can be set to
any non-null, non-empty string. By default the data region for geode for
redis is GEODE_FOR_REDIS. If the system property value is not valid, the
default will quietly be used. The region name will be used in the RegionProvider
and in authentication. Supports both geode. and gemfire. prefixes for the 
system property name

other changes:
- remove comments that say Geode for Redis is experimental (it isn't)
  • Loading branch information
nonbinaryprogrammer authored Oct 25, 2021
1 parent 2770669 commit f5ccf3b
Show file tree
Hide file tree
Showing 20 changed files with 167 additions and 56 deletions.
4 changes: 2 additions & 2 deletions ci/scripts/execute_redis_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ export JAVA_HOME=${JAVA_TEST_PATH}

# This will cause all buckets to be created
../geode-assembly/build/install/apache-geode/bin/gfsh -e "connect --jmx-manager=localhost[1099]" \
-e "query --query='select count(*) from /REDIS_DATA'"
-e "query --query='select count(*) from /GEODE_FOR_REDIS'"

failCount=0

Expand All @@ -66,7 +66,7 @@ failCount=0

# This will cause all buckets to be created
../geode-assembly/build/install/apache-geode/bin/gfsh -e "connect --jmx-manager=localhost[1099]" \
-e "query --query='select count(*) from /REDIS_DATA'"
-e "query --query='select count(*) from /GEODE_FOR_REDIS'"

./runtest --host 127.0.0.1 --port 6379 \
--single unit/type/set \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1929,10 +1929,6 @@ public interface ConfigurationProperties {
* all local addresses.
* </p>
* <U>Default</U>: ""
*
* <p>
* Experimental: Geode API compatible with Redis is subject to change in future releases
* <p/>
*/

String REDIS_BIND_ADDRESS = "geode-for-redis-bind-address";
Expand All @@ -1948,10 +1944,6 @@ public interface ConfigurationProperties {
* When the default value of false, Geode for Redis is not available.
* Set to true to enable Geode for Redis.
* </p>
*
* <p>
* Experimental: Geode API compatible with Redis is subject to change in future releases
* <p/>
*/
String REDIS_ENABLED = "geode-for-redis-enabled";
/**
Expand All @@ -1962,10 +1954,6 @@ public interface ConfigurationProperties {
* to authenticate using only a password. This requires a SecurityManager to be configured.
* </p>
* <U>Default</U>: default
*
* <p>
* Experimental: Geode API compatible with Redis is subject to change in future releases
* <p/>
*/
String REDIS_USERNAME = "geode-for-redis-username";
/**
Expand All @@ -1978,10 +1966,6 @@ public interface ConfigurationProperties {
* <U>Default</U>: 6379
* </p>
* <U>Allowed values</U>: 0..65535
*
* <p>
* Experimental: Geode API compatible with Redis is subject to change in future releases
* <p/>
*/
String REDIS_PORT = "geode-for-redis-port";
/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,11 @@ public String getPassword() {
return "default";
}

@Override
protected void setupCacheWithSecurityAndRegionName(String regionName) {
setupCacheWithSecurity();
}

@Override
public void setupCacheWithSecurity() {
redisContainer =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ public DistributedMember moveBucketForKey(String key, String targetServerName) {
return getMember(1).invoke("moveBucketForKey: " + key + " -> " + targetServerName,
() -> {
Region<RedisKey, RedisData> r = RedisClusterStartupRule.getCache()
.getRegion(RegionProvider.REDIS_DATA_REGION);
.getRegion(RegionProvider.DEFAULT_REDIS_REGION_NAME);

RedisKey redisKey = new RedisKey(key.getBytes());
DistributedMember primaryMember =
Expand Down Expand Up @@ -222,7 +222,7 @@ public DistributedMember moveBucketForKey(String key, String targetServerName) {
public String getKeyOnServer(String keyPrefix, int vmId) {
return getMember(1).invoke("getKeyOnServer", () -> {
Region<RedisKey, RedisData> r = RedisClusterStartupRule.getCache()
.getRegion(RegionProvider.REDIS_DATA_REGION);
.getRegion(RegionProvider.DEFAULT_REDIS_REGION_NAME);

String server = "server-" + vmId;
String key;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ private void validateBucketMapping(Map<String, Integer> bucketMap) {
private Map<String, Integer> getKeyToBucketMap(MemberVM vm) {
return vm.invoke((SerializableCallableIF<Map<String, Integer>>) () -> {
Region<RedisKey, RedisData> region =
RedisClusterStartupRule.getCache().getRegion(RegionProvider.REDIS_DATA_REGION);
RedisClusterStartupRule.getCache().getRegion(RegionProvider.DEFAULT_REDIS_REGION_NAME);

LocalDataSet local = (LocalDataSet) PartitionRegionHelper.getLocalPrimaryData(region);
Map<String, Integer> keyMap = new HashMap<>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ private void compareBuckets() {
server1.invoke(() -> {
InternalCache cache = ClusterStartupRule.getCache();
PartitionedRegion region =
(PartitionedRegion) cache.getRegion(RegionProvider.REDIS_DATA_REGION);
(PartitionedRegion) cache.getRegion(RegionProvider.DEFAULT_REDIS_REGION_NAME);
for (int j = 0; j < region.getTotalNumberOfBuckets(); j++) {
List<BucketDump> buckets = region.getAllBucketEntries(j);
assertThat(buckets.size()).isEqualTo(2);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ private static int getBucketId(Object key) {
}

private static Region<RedisKey, RedisData> getDataRegion() {
return ClusterStartupRule.getCache().getRegion(RegionProvider.REDIS_DATA_REGION);
return ClusterStartupRule.getCache().getRegion(RegionProvider.DEFAULT_REDIS_REGION_NAME);
}

private static boolean isPrimaryForBucket(int bucketId) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ private static int getBucketId(Object key) {
}

private static Region<RedisKey, RedisData> getDataRegion() {
return ClusterStartupRule.getCache().getRegion(RegionProvider.REDIS_DATA_REGION);
return ClusterStartupRule.getCache().getRegion(RegionProvider.DEFAULT_REDIS_REGION_NAME);
}

private static boolean isPrimaryForBucket(int bucketId) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ private static int getBucketId(Object key) {
}

private static Region<RedisKey, RedisData> getDataRegion() {
return ClusterStartupRule.getCache().getRegion(RegionProvider.REDIS_DATA_REGION);
return ClusterStartupRule.getCache().getRegion(RegionProvider.DEFAULT_REDIS_REGION_NAME);
}

private static boolean isPrimaryForBucket(int bucketId) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ private void stopNodeWithPrimaryBucketOfTheKey(boolean isCrash) {

private static boolean isPrimaryForKey() {
PartitionedRegion region = (PartitionedRegion) ClusterStartupRule.getCache()
.getRegion(RegionProvider.REDIS_DATA_REGION);
.getRegion(RegionProvider.DEFAULT_REDIS_REGION_NAME);
int bucketId = PartitionedRegionHelper.getHashKey(region, Operation.GET,
new RedisKey(KEY.getBytes()), null, null);
return region.getLocalPrimaryBucketsListTestOnly().contains(bucketId);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ private void removeAllButFirstEntry() {

private static boolean isPrimaryForKey() {
PartitionedRegion region = (PartitionedRegion) requireNonNull(ClusterStartupRule.getCache())
.getRegion(RegionProvider.REDIS_DATA_REGION);
.getRegion(RegionProvider.DEFAULT_REDIS_REGION_NAME);
int bucketId = PartitionedRegionHelper
.getHashKey(region, Operation.GET, new RedisKey(KEY.getBytes()), null, null);
return region.getLocalPrimaryBucketsListTestOnly().contains(bucketId);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ private void compareMaxInactiveIntervals() {
cluster.getVM(1).invoke(() -> {
InternalCache cache = ClusterStartupRule.getCache();
PartitionedRegion region =
(PartitionedRegion) cache.getRegion(RegionProvider.REDIS_DATA_REGION);
(PartitionedRegion) cache.getRegion(RegionProvider.DEFAULT_REDIS_REGION_NAME);
for (int j = 0; j < region.getTotalNumberOfBuckets(); j++) {
List<BucketDump> buckets = region.getAllBucketEntries(j);
if (buckets.isEmpty()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@ public abstract class AbstractAuthIntegrationTest {

Jedis jedis;

protected abstract void setupCacheWithSecurityAndRegionName(String regionName) throws Exception;

protected abstract void setupCacheWithSecurity() throws Exception;

protected abstract void setupCacheWithoutSecurity() throws Exception;
Expand Down Expand Up @@ -148,6 +150,14 @@ public void givenNoSecurity_accessWithoutAuth_passes() throws Exception {
assertThat(jedis.ping()).isEqualTo("PONG");
}

@Test
public void givenSecurityAndNonDefaultRegionName_authPasses() throws Exception {
setupCacheWithSecurityAndRegionName("nonDefault");

assertThat(jedis.auth(getPassword())).isEqualTo("OK");
assertThat(jedis.ping()).isEqualTo("PONG");
}

@Test
public void givenSecurity_largeMultiBulkRequestsFail_whenNotAuthenticated() throws Exception {
setupCacheWithSecurity();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,15 @@
import static org.apache.geode.distributed.ConfigurationProperties.LOCATORS;
import static org.apache.geode.distributed.ConfigurationProperties.LOG_LEVEL;
import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT;
import static org.apache.geode.redis.internal.RedisProperties.REDIS_REGION_NAME_PROPERTY;
import static org.apache.geode.test.dunit.rules.RedisClusterStartupRule.REDIS_CLIENT_TIMEOUT;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;

import org.junit.After;
import org.junit.Rule;
import org.junit.Test;
import org.junit.contrib.java.lang.system.RestoreSystemProperties;
import redis.clients.jedis.Jedis;

import org.apache.geode.cache.CacheFactory;
Expand All @@ -43,6 +46,9 @@ public class AuthIntegrationTest extends AbstractAuthIntegrationTest {
private GemFireCache cache;
private int port;

@Rule
public RestoreSystemProperties restoreSystemProperties = new RestoreSystemProperties();

@After
public void tearDown() {
server.shutdown();
Expand All @@ -64,6 +70,11 @@ public String getPassword() {
return "dataWrite";
}

@Override
protected void setupCacheWithSecurityAndRegionName(String regionName) throws Exception {
setupCacheWithRegionName(getUsername(), regionName, true);
}

public void setupCacheWithSecurity() throws Exception {
setupCache(getUsername(), true);
}
Expand All @@ -78,25 +89,27 @@ private void setupCache(String username, boolean withSecurityManager) throws Exc
* setting this value.
*/
System.setProperty("io.netty.eventLoopThreads", "1");
try {
port = AvailablePortHelper.getRandomAvailableTCPPort();
CacheFactory cf = new CacheFactory();
cf.set(LOG_LEVEL, "error");
cf.set(MCAST_PORT, "0");
cf.set(LOCATORS, "");
if (username != null) {
cf.set(ConfigurationProperties.REDIS_USERNAME, username);
}
if (withSecurityManager) {
cf.set(ConfigurationProperties.SECURITY_MANAGER, SimpleSecurityManager.class.getName());
}
cache = cf.create();
server = new GeodeRedisServer("localhost", port, (InternalCache) cache);
server.getRegionProvider().getSlotAdvisor().getBucketSlots();
this.jedis = new Jedis("localhost", port, 100000);
} finally {
System.clearProperty("io.netty.eventLoopThreads");
port = AvailablePortHelper.getRandomAvailableTCPPort();
CacheFactory cf = new CacheFactory();
cf.set(LOG_LEVEL, "error");
cf.set(MCAST_PORT, "0");
cf.set(LOCATORS, "");
if (username != null) {
cf.set(ConfigurationProperties.REDIS_USERNAME, username);
}
if (withSecurityManager) {
cf.set(ConfigurationProperties.SECURITY_MANAGER, SimpleSecurityManager.class.getName());
}
cache = cf.create();
server = new GeodeRedisServer("localhost", port, (InternalCache) cache);
server.getRegionProvider().getSlotAdvisor().getBucketSlots();
this.jedis = new Jedis("localhost", port, 100000);
}

private void setupCacheWithRegionName(String username, String regionName,
boolean withSecurityManager) throws Exception {
System.setProperty(REDIS_REGION_NAME_PROPERTY, regionName);
setupCache(username, withSecurityManager);
}

@Test
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ EnumMap<Measurement, Integer> expectedPerEntryOverhead() {
public void printHistogramOfOneRedisKey() throws IllegalAccessException {
final PartitionedRegion dataRegion =
(PartitionedRegion) CacheFactory.getAnyInstance()
.getRegion(RegionProvider.REDIS_DATA_REGION);
.getRegion(RegionProvider.DEFAULT_REDIS_REGION_NAME);
final Object redisKey = dataRegion.keys().iterator().next();
BucketRegion bucket = dataRegion.getBucketRegion(redisKey);
RegionEntry entry = bucket.entries.getEntry(redisKey);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,27 @@

package org.apache.geode.redis.internal;

import org.apache.commons.lang3.StringUtils;

public class RedisProperties {
/** System Property Names **/
public static final String REDIS_REGION_NAME_PROPERTY = "geode-for-redis-region-name";
public static final String WRITE_TIMEOUT_SECONDS = "geode-for-redis-write-timeout-seconds";
public static final String EXPIRATION_INTERVAL_SECONDS =
"geode-for-redis-expiration-interval-seconds";

public static String getStringSystemProperty(String propName, String defaultValue) {
String geodeValue = System.getProperty("geode." + propName, defaultValue);
String gemfireValue = System.getProperty("gemfire." + propName, defaultValue);

if (StringUtils.isNotEmpty(geodeValue) && !geodeValue.equals(defaultValue)) {
return geodeValue;
} else if (StringUtils.isNotEmpty(gemfireValue) && !gemfireValue.equals(defaultValue)) {
return gemfireValue;
} else {
return defaultValue;
}
}

/** assumes that default is greater than or equal to minValue **/
public static int getIntegerSystemProperty(String propName, int defaultValue, int minValue) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
*/
package org.apache.geode.redis.internal;

import static org.apache.geode.redis.internal.RedisProperties.REDIS_REGION_NAME_PROPERTY;
import static org.apache.geode.redis.internal.RedisProperties.getStringSystemProperty;
import static org.apache.geode.redis.internal.data.NullRedisDataStructures.NULL_REDIS_STRING;
import static org.apache.geode.redis.internal.data.RedisDataType.REDIS_DATA;
import static org.apache.geode.redis.internal.data.RedisDataType.REDIS_STRING;
Expand Down Expand Up @@ -55,7 +57,7 @@ public class RegionProvider {
/**
* The name of the region that holds data stored in redis.
*/
public static final String REDIS_DATA_REGION = "REDIS_DATA";
public static final String DEFAULT_REDIS_REGION_NAME = "GEODE_FOR_REDIS";
public static final String REDIS_REGION_BUCKETS_PARAM = "redis.region.buckets";

// Ideally the bucket count should be a power of 2, but technically it is not required.
Expand All @@ -72,6 +74,7 @@ public class RegionProvider {
private final StripedCoordinator stripedCoordinator;
private final RedisStats redisStats;
private final CacheTransactionManager txManager;
private final String redisRegionName;

public RegionProvider(InternalCache cache, StripedCoordinator stripedCoordinator,
RedisStats redisStats) {
Expand All @@ -89,7 +92,11 @@ public RegionProvider(InternalCache cache, StripedCoordinator stripedCoordinator
attributesFactory.setTotalNumBuckets(REDIS_REGION_BUCKETS);
redisDataRegionFactory.setPartitionAttributes(attributesFactory.create());

dataRegion = redisDataRegionFactory.create(REDIS_DATA_REGION);
redisRegionName =
getStringSystemProperty(REDIS_REGION_NAME_PROPERTY, DEFAULT_REDIS_REGION_NAME);

dataRegion = redisDataRegionFactory.create(redisRegionName);

partitionedRegion = (PartitionedRegion) dataRegion;

txManager = cache.getCacheTransactionManager();
Expand All @@ -113,6 +120,10 @@ public RedisStats getRedisStats() {
return redisStats;
}

public String getRedisRegionName() {
return redisRegionName;
}

public <T> T lockedExecute(RedisKey key, Callable<T> callable) {
try {
return partitionedRegion.computeWithPrimaryLocked(key,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@
package org.apache.geode.redis.internal.netty;

import static org.apache.geode.redis.internal.RedisConstants.ERROR_NOT_AUTHORIZED;
import static org.apache.geode.redis.internal.RedisProperties.REDIS_REGION_NAME_PROPERTY;
import static org.apache.geode.redis.internal.RedisProperties.getStringSystemProperty;
import static org.apache.geode.redis.internal.RegionProvider.DEFAULT_REDIS_REGION_NAME;

import java.io.IOException;
import java.math.BigInteger;
Expand Down Expand Up @@ -96,14 +99,23 @@ public class ExecutionHandlerContext extends ChannelInboundHandlerAdapter {
private Subject subject;

static {
String resourcePermission = System.getProperty("redis.resource-permission",
"DATA:WRITE:" + RegionProvider.REDIS_DATA_REGION);
ResourcePermission tempPermission;
String regionName =
getStringSystemProperty(REDIS_REGION_NAME_PROPERTY, DEFAULT_REDIS_REGION_NAME);
String resourcePermission = System.getProperty("redis.resource-permission", "DATA:WRITE:"
+ regionName);
String[] parts = resourcePermission.split(":");
if (parts.length != 3) {
parts = new String[] {"DATA", "WRITE", RegionProvider.REDIS_DATA_REGION};

try {
if (parts.length != 3) {
throw new IllegalArgumentException();
}
tempPermission = new ResourcePermission(parts[0], parts[1], parts[2]);
} catch (IllegalArgumentException e) {
tempPermission = new ResourcePermission("DATA", "WRITE", DEFAULT_REDIS_REGION_NAME);
}

RESOURCE_PERMISSION = new ResourcePermission(parts[0], parts[1], parts[2]);
RESOURCE_PERMISSION = tempPermission;
}

/**
Expand Down
Loading

0 comments on commit f5ccf3b

Please sign in to comment.