Skip to content

Commit

Permalink
Don't consider deleted schema when checking compatibility (apache#4669)
Browse files Browse the repository at this point in the history
feca5bb changed topic delete logic to delete the schema when the
topic is deleted (though this only seems to be enabled for idle topic
GC). This exposed a bug in compatibility checking whereby if the a
subscription tries to attach to the topic, even if using the same
schema as had been used previously, a compatibility exception will be
thrown.

This is because the topic still appears to have a schema, even though
there is no actual schema data, just a tombstone. I've changed the logic
to return no schema if the schema read back is a tombstone.

The issue doesn't affect producers because the check was already
correct there.

I've also updated the check for transitive compatibility to remove the
prefix of schemas before the deleted schema. Previously this was
throwing an NPE on the broker as it couldn't decode the deleted
schema.

This issue was discovered by failures in the healthcheck. The check
period (5 minutes) was longer than the GC period (60 seconds). I would
expect it to hit quite often in other scenarios also.
  • Loading branch information
ivankelly authored Jul 8, 2019
1 parent c15c867 commit d77980d
Show file tree
Hide file tree
Showing 3 changed files with 130 additions and 14 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,13 @@ public class SchemaRegistryServiceImpl implements SchemaRegistryService {
@Override
@NotNull
public CompletableFuture<SchemaAndMetadata> getSchema(String schemaId) {
return getSchema(schemaId, SchemaVersion.Latest);
return getSchema(schemaId, SchemaVersion.Latest).thenApply((schema) -> {
if (schema != null && schema.schema.isDeleted()) {
return null;
} else {
return schema;
}
});
}

@Override
Expand Down Expand Up @@ -184,21 +190,35 @@ private CompletableFuture<Boolean> checkCompatibilityWithLatest(String schemaId,
SchemaCompatibilityStrategy strategy) {
return getSchema(schemaId)
.thenApply(
(existingSchema) ->
!(existingSchema == null || existingSchema.schema.isDeleted())
&& isCompatible(existingSchema, schema, strategy));
(existingSchema) ->
!(existingSchema == null || existingSchema.schema.isDeleted())
&& isCompatible(existingSchema, schema, strategy));
}

private CompletableFuture<Boolean> checkCompatibilityWithAll(String schemaId, SchemaData schema,
SchemaCompatibilityStrategy strategy) {
return getAllSchemas(schemaId)
.thenCompose(FutureUtils::collect)
.thenApply(schemaAndMetadataList -> schemaAndMetadataList
.stream()
.map(schemaAndMetadata -> schemaAndMetadata.schema)
.collect(Collectors.toList()))
.thenApply(schemas -> compatibilityChecks.getOrDefault(schema.getType(), SchemaCompatibilityCheck.DEFAULT)
.isCompatible(schemas, schema, strategy));
.thenCompose(FutureUtils::collect)
.thenApply(list -> {
// Trim the prefix of schemas before the latest delete.
int lastIndex = list.size() - 1;
for (int i = lastIndex; i >= 0; i--) {
if (list.get(i).schema.isDeleted()) {
if (i == lastIndex) { // if the latest schema is a delete, there's no schemas to compare
return Collections.<SchemaAndMetadata>emptyList();
} else {
return list.subList(i + 1, list.size());
}
}
}
return list;
})
.thenApply(schemaAndMetadataList -> schemaAndMetadataList
.stream()
.map(schemaAndMetadata -> schemaAndMetadata.schema)
.collect(Collectors.toList()))
.thenApply(schemas -> compatibilityChecks.getOrDefault(schema.getType(), SchemaCompatibilityCheck.DEFAULT)
.isCompatible(schemas, schema, strategy));
}

interface Functions {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;

import java.time.Clock;
Expand Down Expand Up @@ -106,9 +107,7 @@ public void writeReadBackDeleteSchemaEntry() throws Exception {

deleteSchema(schemaId1, version(1));

SchemaData latest2 = getLatestSchema(schemaId1, version(1));

assertTrue(latest2.isDeleted());
assertNull(schemaRegistryService.getSchema(schemaId1).get());
}

@Test
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.pulsar.client.impl;

import com.google.common.collect.Sets;

import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest;
import org.apache.pulsar.client.api.MessageId;
import org.apache.pulsar.client.api.Producer;
import org.apache.pulsar.client.api.Reader;
import org.apache.pulsar.client.api.Schema;

import org.apache.pulsar.common.policies.data.ClusterData;
import org.apache.pulsar.common.policies.data.SchemaAutoUpdateCompatibilityStrategy;
import org.apache.pulsar.common.policies.data.TenantInfo;

import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;

public class SchemaDeleteTest extends MockedPulsarServiceBaseTest {

private static final String subscription = "reader-sub";

@BeforeMethod
@Override
protected void setup() throws Exception {

super.internalSetup();
this.conf.setBrokerDeleteInactiveTopicsFrequencySeconds(5);

admin.clusters().createCluster("test",
new ClusterData("http://127.0.0.1:" + BROKER_WEBSERVICE_PORT));
admin.tenants().createTenant("my-property",
new TenantInfo(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet("test")));
admin.namespaces().createNamespace("my-property/my-ns", Sets.newHashSet("test"));
}

@AfterMethod
@Override
protected void cleanup() throws Exception {
super.internalCleanup();
}

@Test
public void createTopicDeleteTopicCreateTopic() throws Exception {
String namespace = "my-property/my-ns";
String topic = namespace + "/topic1";
String foobar = "foo";

try (Producer<String> producer =
pulsarClient.newProducer(Schema.STRING).topic(topic).create()) {
producer.send(foobar);
}

admin.topics().delete(topic);
admin.schemas().deleteSchema(topic);

// creating a subscriber will check the schema against the latest
// schema, which in this case should be a tombstone, which should
// behave as if the schema never existed
try (Reader<String> reader = pulsarClient.newReader(Schema.STRING)
.topic(topic).startMessageId(MessageId.latest).create()) {
}

admin.namespaces().setSchemaAutoUpdateCompatibilityStrategy(namespace,
SchemaAutoUpdateCompatibilityStrategy.BackwardTransitive);
admin.topics().delete(topic);
admin.schemas().deleteSchema(topic);

// with a transitive policy we should check all previous schemas. But we
// shouldn't check against those that were there before we deleted the schema.
try (Reader<DummyPojo> reader = pulsarClient.newReader(Schema.AVRO(DummyPojo.class))
.topic(topic).startMessageId(MessageId.latest).create()) {
}
}

public static class DummyPojo {
int foobar;
}
}

0 comments on commit d77980d

Please sign in to comment.