Skip to content

Commit

Permalink
GEODE-10419: Enhancment of backup disk-store command (apache#7851)
Browse files Browse the repository at this point in the history
* GEODE-10419: initial commit

* GEODE-10419: documentation impacts

* GEODE-10419: added DT
  • Loading branch information
mivanac authored Sep 14, 2022
1 parent 16627d7 commit e4d2f16
Show file tree
Hide file tree
Showing 13 changed files with 399 additions and 14 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.ArgumentMatchers.isA;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.inOrder;
Expand Down Expand Up @@ -68,7 +69,6 @@ public void createCache() throws MalformedObjectNameException {
backupService = mock(BackupService.class);
when(cache.getBackupService()).thenReturn(backupService);
when(cache.getPersistentMemberManager()).thenReturn(memberManager);
when(cache.getBackupService()).thenReturn(backupService);

DLockService dlock = mock(DLockService.class);
when(dlock.lock(any(), anyLong(), anyLong())).thenReturn(true);
Expand Down Expand Up @@ -114,7 +114,7 @@ public void testSuccessfulBackup() throws Exception {

InOrder inOrder = inOrder(dm, backupService);
inOrder.verify(dm).putOutgoing(isA(PrepareBackupRequest.class));
inOrder.verify(backupService).prepareBackup(any(), any());
inOrder.verify(backupService).prepareBackup(any(), any(), eq(null));
inOrder.verify(dm).putOutgoing(isA(FinishBackupRequest.class));
inOrder.verify(backupService).doBackup();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ public class InternalConfigurationPersistenceService implements ConfigurationPer
*/
public static final String CLUSTER_CONFIG_ARTIFACTS_DIR_NAME = "cluster_config";

private static final String CLUSTER_CONFIG_DISK_STORE_NAME = "cluster_config";
public static final String CLUSTER_CONFIG_DISK_STORE_NAME = "cluster_config";

public static final String CLUSTER_CONFIG_DISK_DIR_PREFIX = "ConfigDiskDir_";

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import static org.apache.geode.internal.cache.backup.AbstractBackupWriterConfig.TIMESTAMP;
import static org.apache.geode.internal.cache.backup.AbstractBackupWriterConfig.TYPE;
import static org.apache.geode.internal.cache.backup.FileSystemBackupWriterConfig.BASELINE_DIR;
import static org.apache.geode.internal.cache.backup.FileSystemBackupWriterConfig.INCLUDE_DISK_STORES;
import static org.apache.geode.internal.cache.backup.FileSystemBackupWriterConfig.TARGET_DIR;

import java.text.SimpleDateFormat;
Expand All @@ -27,6 +28,7 @@ class BackupConfigFactory {

private String targetDirPath;
private String baselineDirPath;
private String includeDiskStores;

BackupConfigFactory() {
// nothing
Expand All @@ -42,6 +44,11 @@ BackupConfigFactory withBaselineDirPath(String baselineDirPath) {
return this;
}

BackupConfigFactory withIncludeDiskStores(String includeDiskStores) {
this.includeDiskStores = includeDiskStores;
return this;
}

Properties createBackupProperties() {
Properties properties = new Properties();
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd-HH-mm-ss");
Expand All @@ -51,6 +58,9 @@ Properties createBackupProperties() {
if (baselineDirPath != null) {
properties.setProperty(BASELINE_DIR, baselineDirPath);
}
if (includeDiskStores != null) {
properties.setProperty(INCLUDE_DISK_STORES, includeDiskStores);
}
return properties;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,14 @@ public BackupStatus backupAllMembers(String targetDirPath, String baselineDirPat
return performBackup(properties);
}

public BackupStatus backupAllMembers(String targetDirPath, String baselineDirPath,
String includeDiskStores) {
Properties properties = new BackupConfigFactory().withTargetDirPath(targetDirPath)
.withBaselineDirPath(baselineDirPath).withIncludeDiskStores(includeDiskStores)
.createBackupProperties();
return performBackup(properties);
}

private BackupStatus performBackup(Properties properties) throws ManagementException {
if (backupLockService.obtainLock(dm)) {
try {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,14 @@ public BackupService(InternalCache cache) {

public HashSet<PersistentID> prepareBackup(InternalDistributedMember sender, BackupWriter writer)
throws IOException, InterruptedException {
return prepareBackup(sender, writer, null);
}

public HashSet<PersistentID> prepareBackup(InternalDistributedMember sender, BackupWriter writer,
String includeDiskStores)
throws IOException, InterruptedException {
validateRequestingSender(sender);
BackupTask backupTask = new BackupTask(cache, writer);
BackupTask backupTask = new BackupTask(cache, writer, includeDiskStores);
if (!currentTask.compareAndSet(null, backupTask)) {
throw new IOException("Another backup is already in progress");
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,19 @@
*/
package org.apache.geode.internal.cache.backup;

import static org.apache.geode.distributed.internal.InternalConfigurationPersistenceService.CLUSTER_CONFIG_DISK_STORE_NAME;

import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.stream.Collectors;

import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.Logger;

import org.apache.geode.InternalGemFireError;
Expand All @@ -37,7 +43,6 @@
*/
class BackupTask {
private static final Logger logger = LogService.getLogger();

private final Map<DiskStoreImpl, DiskStoreBackup> backupByDiskStore = new HashMap<>();
private final RestoreScript restoreScript = new RestoreScript();
private final InternalCache cache;
Expand All @@ -46,15 +51,25 @@ class BackupTask {
private final CountDownLatch otherMembersReady = new CountDownLatch(1);
private final HashSet<PersistentID> diskStoresWithData = new HashSet<>();
private final BackupWriter backupWriter;
private final Set<String> includeDiskStoresSet = new HashSet<>();

private volatile boolean isCancelled;

private TemporaryBackupFiles temporaryFiles;
private BackupFileCopier fileCopier;

BackupTask(InternalCache cache, BackupWriter backupWriter) {
BackupTask(InternalCache cache, BackupWriter backupWriter, String includeDiskStores) {
this.cache = cache;
this.backupWriter = backupWriter;
if (includeDiskStores != null) {
this.includeDiskStoresSet.addAll(Arrays.stream(includeDiskStores.split(","))
.filter(StringUtils::isNotBlank)
.collect(Collectors.toSet()));
if (!this.includeDiskStoresSet.isEmpty()) {
// add internal disk-store for shared configuration data
this.includeDiskStoresSet.add(CLUSTER_CONFIG_DISK_STORE_NAME);
}
}
}

HashSet<PersistentID> getPreparedDiskStores() throws InterruptedException {
Expand Down Expand Up @@ -86,7 +101,9 @@ HashSet<PersistentID> backup() throws InterruptedException, IOException {
private void prepareForBackup() {
for (DiskStore store : cache.listDiskStoresIncludingRegionOwned()) {
DiskStoreImpl storeImpl = (DiskStoreImpl) store;

if (!isDiskStoreIncluded(store)) {
continue;
}
storeImpl.lockStoreBeforeBackup();
if (logger.isDebugEnabled()) {
logger.debug("Acquired lock for backup on disk store {}", store.getName());
Expand Down Expand Up @@ -145,6 +162,9 @@ private Map<DiskStoreImpl, DiskStoreBackup> startDiskStoreBackups(
Map<DiskStoreImpl, DiskStoreBackup> backupByDiskStore = new HashMap<>();

for (DiskStore store : diskStores) {
if (!isDiskStoreIncluded(store)) {
continue;
}
DiskStoreImpl diskStore = (DiskStoreImpl) store;
try {
if (diskStore.hasPersistedData()) {
Expand All @@ -161,6 +181,16 @@ private Map<DiskStoreImpl, DiskStoreBackup> startDiskStoreBackups(
return backupByDiskStore;
}

boolean isDiskStoreIncluded(DiskStore store) {
if (includeDiskStoresSet.isEmpty()) {
return true;
}
if (includeDiskStoresSet.contains(store.getName())) {
return true;
}
return false;
}

void abort() {
isCancelled = true;
otherMembersReady.countDown();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ class FileSystemBackupWriterConfig extends AbstractBackupWriterConfig {

static final String TARGET_DIR = "TARGET_DIRECTORY";
static final String BASELINE_DIR = "BASELINE_DIRECTORY";
static final String INCLUDE_DISK_STORES = "INCLUDE_DISK_STORES";

FileSystemBackupWriterConfig(Properties properties) {
super(properties);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,19 +26,23 @@ class PrepareBackup {
private final InternalDistributedMember member;
private final InternalCache cache;
private final BackupWriter backupWriter;
private final String includeDiskStores;

PrepareBackup(InternalDistributedMember member, InternalCache cache, BackupWriter backupWriter) {
PrepareBackup(InternalDistributedMember member, InternalCache cache, BackupWriter backupWriter,
String includeDiskStores) {
this.member = member;
this.cache = cache;
this.backupWriter = backupWriter;
this.includeDiskStores = includeDiskStores;
}

HashSet<PersistentID> run() throws IOException, InterruptedException {
HashSet<PersistentID> persistentIds;
if (cache == null) {
persistentIds = new HashSet<>();
} else {
persistentIds = cache.getBackupService().prepareBackup(member, backupWriter);
persistentIds =
cache.getBackupService().prepareBackup(member, backupWriter, includeDiskStores);
}
return persistentIds;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
package org.apache.geode.internal.cache.backup;

import static org.apache.geode.internal.cache.backup.AbstractBackupWriterConfig.TYPE;
import static org.apache.geode.internal.cache.backup.FileSystemBackupWriterConfig.INCLUDE_DISK_STORES;

import java.util.HashSet;
import java.util.Properties;
Expand Down Expand Up @@ -42,7 +43,9 @@ PrepareBackup createPrepareBackup(InternalDistributedMember member, InternalCach
String memberId = cleanSpecialCharacters(member.toString());
BackupWriter backupWriter = BackupWriterFactory.getFactoryForType(properties.getProperty(TYPE))
.createWriter(properties, memberId);
return new PrepareBackup(member, cache, backupWriter);

String includeDiskStores = properties.getProperty(INCLUDE_DISK_STORES);
return new PrepareBackup(member, cache, backupWriter, includeDiskStores);
}

BackupResponse createBackupResponse(InternalDistributedMember sender,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -531,6 +531,14 @@ public class CliStrings {
public static final String BACKUP_DISK_STORE_MSG_NO_DISKSTORES_BACKED_UP =
"No disk store(s) were backed up.";

public static final String BACKUP_INCLUDE_DISK_STORES = "include-disk-stores";
public static final String BACKUP_INCLUDE_DISK_STORES__HELP = "List of disk-stores to include.";
public static final String BACKUP_DISK_STORE__MSG__SPECIFY_VALID_INCLUDE_DISKSTORE_UNKNOWN_DISKSTORE_0 =
"Specify valid include-disk-stores. Unknown Disk Store : \"{0}\".";

public static final String BACKUP_DISK_STORE__MSG__SPECIFY_VALID_INCLUDE_DISKSTORE_UNKNOWN_DISKSTORE_1 =
"Specify valid include-disk-stores. Blank name added in list of disk-stores";

/* 'compact disk-store' command */
public static final String COMPACT_DISK_STORE = "compact disk-store";
public static final String COMPACT_DISK_STORE__HELP =
Expand Down
10 changes: 9 additions & 1 deletion geode-docs/tools_modules/gfsh/command-pages/backup.html.md.erb
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ You can also use this command to perform an incremental backup. See [Creating Ba
**Syntax:**

``` pre
backup disk-store --dir=value [--baseline-dir=value]
backup disk-store --dir=value [--baseline-dir=value] [--include-disk-stores=value(,value)*]
```

<a id="topic_E74ED23CB60342538B2175C326E7D758__table_2277A2CE8F6E4731B45FEFA2B1366DB6"></a>
Expand All @@ -59,6 +59,11 @@ backup disk-store --dir=value [--baseline-dir=value]
<td>Directory that contains the baseline backup used for comparison during an incremental backup.
<p>An incremental backup operation backs up any data that is not present in the directory specified in <span class="keyword parmname">&#8209;&#8209;baseline-dir</span>. If the member cannot find previously backed up data or if the previously backed up data is corrupt, the command performs a full backup on that member.</p></td>
</tr>
<tr>
<td><span class="keyword parmname">&#8209;&#8209;include-disk-stores</span></td>
<td>List of disk-stores to include in backup.
<p>Selective backup of disk-stores listed in <span class="keyword parmname">&#8209;&#8209;include-disk-stores</span>. The specified disk-stores must exist in the system. If this parameter is not specified, all disk-stores are included in the backup.</p></td>
</tr>
</tbody>
</table>

Expand All @@ -70,6 +75,9 @@ backup disk-store --dir=value [--baseline-dir=value]
backup disk-store --dir=data/backups

backup disk-store --dir=data/backup/disk-store --baselineDir=data/backups/2012-09-24-17-08-50

backup disk-store --dir=data/backups --include-disk-stores=data

```

**Sample Output:**
Expand Down
Loading

0 comments on commit e4d2f16

Please sign in to comment.