Skip to content

Commit

Permalink
Revert "PHOENIX-4790 Simplify check for client side delete"
Browse files Browse the repository at this point in the history
This reverts commit 35366b3.
  • Loading branch information
JamesRTaylor committed Jul 13, 2018
1 parent 2b43bea commit 0af8b1e
Showing 1 changed file with 19 additions and 5 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@
import org.apache.phoenix.execute.MutationState;
import org.apache.phoenix.execute.MutationState.MultiRowMutationState;
import org.apache.phoenix.execute.MutationState.RowMutationState;
import org.apache.phoenix.filter.SkipScanFilter;
import org.apache.phoenix.hbase.index.ValueGetter;
import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
Expand Down Expand Up @@ -481,7 +482,6 @@ else if (table.isTransactional() && connection.getSCN() != null) {
projectedColumns.add(column);
aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, '"' + column.getName().getString() + '"', null)));
}
boolean noQueryReqd = true;
// Project all non PK indexed columns so that we can do the proper index maintenance
for (PTable index : table.getIndexes()) {
IndexMaintainer maintainer = index.getIndexMaintainer(table, connection);
Expand All @@ -493,8 +493,6 @@ else if (table.isTransactional() && connection.getSCN() != null) {
boolean hasNoColumnFamilies = table.getColumnFamilies().isEmpty();
PColumn column = hasNoColumnFamilies ? table.getColumnForColumnName(columnName) : table.getColumnFamily(familyName).getPColumnForColumnName(columnName);
if(!projectedColumns.contains(column)) {
// We must run a query if any index contains a non pk column
noQueryReqd = false;
projectedColumns.add(column);
aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(hasNoColumnFamilies ? null : TableName.create(null, familyName), '"' + columnName + '"', null)));
}
Expand All @@ -514,7 +512,7 @@ Collections.<ParseNode> emptyList(), null, delete.getOrderBy(), delete.getLimit(
select = StatementNormalizer.normalize(transformedSelect, resolverToBe);
}
final boolean hasPreOrPostProcessing = hasPreProcessing || hasPostProcessing;
noQueryReqd &= !hasPreOrPostProcessing;
boolean noQueryReqd = !hasPreOrPostProcessing;
// No limit and no sub queries, joins, etc in where clause
// Can't run on same server for transactional data, as we need the row keys for the data
// that is being upserted for conflict detection purposes.
Expand Down Expand Up @@ -553,8 +551,24 @@ Collections.<ParseNode> emptyList(), null, delete.getOrderBy(), delete.getLimit(
}

runOnServer &= queryPlans.get(0).getTableRef().getTable().getType() != PTableType.INDEX;


// We need to have all indexed columns available in all immutable indexes in order
// to generate the delete markers from the query. We also cannot have any filters
// except for our SkipScanFilter for point lookups.
// A simple check of the non existence of a where clause in the parse node is not sufficient, as the where clause
// may have been optimized out. Instead, we check that there's a single SkipScanFilter
// If we can generate a plan for every index, that means all the required columns are available in every index,
// hence we can drive the delete from any of the plans.
noQueryReqd &= queryPlans.size() == 1 + clientSideIndexes.size();
int queryPlanIndex = 0;
while (noQueryReqd && queryPlanIndex < queryPlans.size()) {
QueryPlan plan = queryPlans.get(queryPlanIndex++);
StatementContext context = plan.getContext();
noQueryReqd &= (!context.getScan().hasFilter()
|| context.getScan().getFilter() instanceof SkipScanFilter)
&& context.getScanRanges().isPointLookup();
}

final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);

Expand Down

0 comments on commit 0af8b1e

Please sign in to comment.