Skip to content
Open
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
import org.apache.phoenix.parse.ParseNode;
import org.apache.phoenix.parse.RowValueConstructorParseNode;
import org.apache.phoenix.query.KeyRange;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.schema.PColumn;
import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.schema.PTableType;
Expand Down Expand Up @@ -247,17 +248,19 @@ public CompiledOffset getRVCOffset(StatementContext context, FilterableStatement
// check to see if this was a single key expression
ScanRanges scanRanges = context.getScanRanges();

// We do not generate a point lookup today in phoenix if the rowkey has a trailing null, we
// generate a range scan.
// Always generate point lookup for RVC Offset
if (!scanRanges.isPointLookup()) {
// Since we use a range scan to guarantee we get only the null value and the upper bound is
// unset this suffices
// sanity check
if (!rowKeyColumnExpressionOutput.isTrailingNull()) {
throw new RowValueConstructorOffsetNotCoercibleException(
"RVC Offset must be a point lookup.");
}
key = scanRanges.getScanRange().getUpperRange();
throw new RowValueConstructorOffsetNotCoercibleException(
"RVC Offset must be a point lookup.");
}
if (rowKeyColumnExpressionOutput.isTrailingNull()) {
// Handle trailing nulls in RVC offset by appending null byte at the end to generate immediate
// next key
key = scanRanges.getScanRange().getLowerRange();
byte[] keyCopy = new byte[key.length + 1];
System.arraycopy(key, 0, keyCopy, 0, key.length);
keyCopy[key.length] = QueryConstants.SEPARATOR_BYTE;
key = keyCopy;
Comment on lines +259 to +263
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Any chance we can use ByteUtil.nextKey() to generate next key?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we use ByteUtil.nextKey() then we can miss out on some values to return. Ex- we have a query SELECT * FROM TABLE OFFSET (PK1, PK2) = ('1', null). In this case we would want to return values ('1', '1') or ('1', '10') also. But if we use ByteUtil.nextKey() on the key output from ScanRanges then OFFSET will show all the rows starting ('2', null) and we would miss seeing rows starting with '1'.

There is an existing IT for exact scenario I described above: RowValueConstructorOffsetIT#rvcOffsetTrailingNullKeyTest().

} else {
RowKeySchema.RowKeySchemaBuilder builder =
new RowKeySchema.RowKeySchemaBuilder(columns.size());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,9 @@ public static ScanRanges create(RowKeySchema schema, List<List<KeyRange>> ranges
TimeRange rowTimestampRange = getRowTimestampColumnRange(ranges, schema, rowTimestampColIndex);
boolean isPointLookup = isPointLookup(schema, ranges, slotSpan, useSkipScan);
if (isPointLookup) {
// Do this before transforming the ranges into list of key ranges with single slot
// Once the list is transformed into singleton list, IS_NULL_RANGE is no longer retained.
boolean isPointLookupWithTrailingNulls = isPointLookupWithTrailingNulls(ranges);
// TODO: consider keeping original to use for serialization as it would be smaller?
List<byte[]> keys = ScanRanges.getPointKeys(ranges, slotSpan, schema, nBuckets);
List<KeyRange> keyRanges = Lists.newArrayListWithExpectedSize(keys.size());
Expand All @@ -117,7 +120,9 @@ public static ScanRanges create(RowKeySchema schema, List<List<KeyRange>> ranges
useSkipScan = keyRanges.size() > 1;
// Treat as binary if descending because we've got a separator byte at the end
// which is not part of the value.
if (keys.size() > 1 || hasTrailingDescSeparatorByte(schema)) {
if (
keys.size() > 1 || hasTrailingDescSeparatorByte(schema) || isPointLookupWithTrailingNulls
) {
schema = SchemaUtil.VAR_BINARY_SCHEMA;
slotSpan = ScanUtil.SINGLE_COLUMN_SLOT_SPAN;
} else {
Expand Down Expand Up @@ -618,16 +623,34 @@ private static boolean isPointLookup(RowKeySchema schema, List<List<KeyRange>> r
return false;
}
for (KeyRange keyRange : orRanges) {
// Special case for single trailing IS NULL. We cannot consider this as a point key because
// we strip trailing nulls when we form the key.
if (!keyRange.isSingleKey() || (i == lastIndex && keyRange == KeyRange.IS_NULL_RANGE)) {
// COL IS NULL on trailing nullable PK column can be treated as a point lookup
// because:
// 1. Trailing nulls are stripped when storing the key (no trailing separator bytes)
// 2. getPointKeys() generates the correct key without trailing null bytes
// 3. The generated key matches exactly what's stored for rows with trailing NULL
// 4. Not handling legcay tables impacted via bug PHOENIX-2067
if (
!keyRange.isSingleKey() || lastIndex == i && keyRange == KeyRange.IS_NULL_RANGE
&& !schema.rowKeyOrderOptimizable()
) {
return false;
}
}
}
return true;
}

private static boolean isPointLookupWithTrailingNulls(List<List<KeyRange>> ranges) {
int lastIndex = ranges.size() - 1;
List<KeyRange> lastRange = ranges.get(lastIndex);
for (KeyRange keyRange : lastRange) {
if (keyRange == KeyRange.IS_NULL_RANGE) {
return true;
}
}
return false;
}

private static boolean incrementKey(List<List<KeyRange>> slots, int[] position) {
int idx = slots.size() - 1;
while (idx >= 0 && (position[idx] = (position[idx] + 1) % slots.get(idx).size()) == 0) {
Expand Down Expand Up @@ -656,6 +679,10 @@ private static List<byte[]> getPointKeys(List<List<KeyRange>> ranges, int[] slot
do {
length = ScanUtil.setKey(schema, ranges, slotSpan, position, Bound.LOWER, key, offset, offset,
ranges.size(), offset);
// Handle case someone specify IS NULL on all PK columns.
if (length == 0) {
continue;
}
if (isSalted) {
key[0] = SaltingUtil.getSaltingByte(key, offset, length, bucketNum);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -484,13 +484,26 @@ public static int setKey(RowKeySchema schema, List<List<KeyRange>> slots, int[]
slotEndIndex, slotStartIndex);
}

private static boolean doesSlotsCoverAllColumnsWithoutMultiSpan(RowKeySchema schema,
List<List<KeyRange>> slots, int[] slotSpan) {
long slotSpanSum = 0;
for (int i = 0; i < slotSpan.length; i++) {
slotSpanSum += slotSpan[i];
}
return slotSpanSum == 0 && slots.size() == schema.getMaxFields();
}

public static int setKey(RowKeySchema schema, List<List<KeyRange>> slots, int[] slotSpan,
int[] position, Bound bound, byte[] key, int byteOffset, int slotStartIndex, int slotEndIndex,
int schemaStartIndex) {
int offset = byteOffset;
boolean lastInclusiveUpperSingleKey = false;
boolean allInclusiveLowerSingleKey = bound == Bound.LOWER;
boolean anyInclusiveUpperRangeKey = false;
boolean lastUnboundUpper = false;
boolean slotsCoverAllColumnsWithoutMultiSpan =
doesSlotsCoverAllColumnsWithoutMultiSpan(schema, slots, slotSpan);
int trailingNullCount = 0;
// The index used for slots should be incremented by 1,
// but the index for the field it represents in the schema
// should be incremented by 1 + value in the current slotSpan index
Expand Down Expand Up @@ -521,6 +534,13 @@ public static int setKey(RowKeySchema schema, List<List<KeyRange>> slots, int[]
System.arraycopy(bytes, 0, key, offset, bytes.length);
offset += bytes.length;

if (bytes.length == 0) {
trailingNullCount++;
} else {
trailingNullCount = 0;
}
allInclusiveLowerSingleKey &= range.isSingleKey();

/*
* We must add a terminator to a variable length key even for the last PK column if the lower
* key is non inclusive or the upper key is inclusive. Otherwise, we'd be incrementing the key
Expand Down Expand Up @@ -637,10 +657,15 @@ public static int setKey(RowKeySchema schema, List<List<KeyRange>> slots, int[]
// after the table has data, in which case there won't be a separator
// byte.
if (bound == Bound.LOWER) {
// Remove trailing separator bytes for DESC keys only if they are trailing nulls for point
// lookups and schema is rowKeyOrderOptimizable.
while (
--i >= schemaStartIndex && offset > byteOffset
&& !(field = schema.getField(--fieldIndex)).getDataType().isFixedWidth()
&& field.getSortOrder() == SortOrder.ASC && hasSeparatorBytes(key, field, offset)
&& hasSeparatorBytes(key, field, offset)
&& (field.getSortOrder() == SortOrder.DESC && schema.rowKeyOrderOptimizable()
&& slotsCoverAllColumnsWithoutMultiSpan && allInclusiveLowerSingleKey
&& trailingNullCount-- > 0 || field.getSortOrder() == SortOrder.ASC)
) {
if (field.getDataType() != PVarbinaryEncoded.INSTANCE) {
offset--;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,29 +33,21 @@
import java.sql.Time;
import java.sql.Timestamp;
import java.sql.Types;
import java.util.HashMap;
import org.apache.hadoop.hbase.TableName;
import org.apache.phoenix.compile.ExplainPlan;
import org.apache.phoenix.compile.ExplainPlanAttributes;
import org.apache.phoenix.compile.QueryPlan;
import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
import org.apache.phoenix.jdbc.PhoenixStatement;
import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.util.ByteUtil;
import org.apache.phoenix.util.DateUtil;
import org.apache.phoenix.util.ReadOnlyProps;
import org.apache.phoenix.util.TestUtil;
import org.bson.RawBsonDocument;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;

@Category(NeedsOwnMiniClusterTest.class)
public class WhereOptimizerForArrayAnyIT extends BaseTest {
@BeforeClass
public static void setup() throws Exception {
setUpTestDriver(new ReadOnlyProps(new HashMap<String, String>()));
}
public class WhereOptimizerForArrayAnyIT extends WhereOptimizerForArrayAnyITBase {

@Test
public void testArrayAnyComparisonForNonPkColumn() throws Exception {
Expand Down Expand Up @@ -784,37 +776,12 @@ private void assertPointLookupsAreNotGenerated(PreparedStatement stmt) throws SQ
assertEquals("FULL SCAN ", planAttributes.getExplainScanType());
}

private void assertPointLookupsAreGenerated(PreparedStatement stmt, int noOfPointLookups)
throws SQLException {
QueryPlan queryPlan = stmt.unwrap(PhoenixPreparedStatement.class).optimizeQuery();
assertPointLookupsAreGenerated(queryPlan, noOfPointLookups);
}

private void assertPointLookupsAreGenerated(Statement stmt, String selectSql,
int noOfPointLookups) throws SQLException {
QueryPlan queryPlan = stmt.unwrap(PhoenixStatement.class).optimizeQuery(selectSql);
assertPointLookupsAreGenerated(queryPlan, noOfPointLookups);
}

private void assertSkipScanIsGenerated(PreparedStatement stmt, int skipListSize)
throws SQLException {
QueryPlan queryPlan = stmt.unwrap(PhoenixPreparedStatement.class).optimizeQuery();
ExplainPlan explain = queryPlan.getExplainPlan();
ExplainPlanAttributes planAttributes = explain.getPlanStepsAsAttributes();
String expectedScanType =
"SKIP SCAN ON " + skipListSize + " KEY" + (skipListSize > 1 ? "S " : " ");
assertEquals(expectedScanType, planAttributes.getExplainScanType());
}

private void assertPointLookupsAreGenerated(QueryPlan queryPlan, int noOfPointLookups)
throws SQLException {
ExplainPlan explain = queryPlan.getExplainPlan();
ExplainPlanAttributes planAttributes = explain.getPlanStepsAsAttributes();
String expectedScanType =
"POINT LOOKUP ON " + noOfPointLookups + " KEY" + (noOfPointLookups > 1 ? "S " : " ");
assertEquals(expectedScanType, planAttributes.getExplainScanType());
}

private void createTableASCPkColumns(String tableName) throws SQLException {
String ddl = "CREATE TABLE " + tableName + " (" + "pk1 INTEGER NOT NULL, " + "pk2 VARCHAR(3), "
+ "col1 VARCHAR, " + "CONSTRAINT pk PRIMARY KEY (pk1, pk2)" + ")";
Expand Down Expand Up @@ -887,5 +854,4 @@ private static RawBsonDocument getBsonDocument2() {
+ " \"attr_0\" : \"str_val_0\"\n" + "}";
return RawBsonDocument.parse(json);
}

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.end2end;

import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;

import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.HashMap;
import org.apache.phoenix.compile.ExplainPlan;
import org.apache.phoenix.compile.ExplainPlanAttributes;
import org.apache.phoenix.compile.QueryPlan;
import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.util.ReadOnlyProps;
import org.junit.BeforeClass;

/**
* Base class for WhereOptimizerForArrayAnyIT tests containing shared helper methods.
*/
public abstract class WhereOptimizerForArrayAnyITBase extends BaseTest {

@BeforeClass
public static void setup() throws Exception {
setUpTestDriver(new ReadOnlyProps(new HashMap<String, String>()));
}

protected void assertPointLookupsAreGenerated(PreparedStatement stmt, int noOfPointLookups)
throws SQLException {
QueryPlan queryPlan = stmt.unwrap(PhoenixPreparedStatement.class).optimizeQuery();
assertPointLookupsAreGenerated(queryPlan, noOfPointLookups);
}

protected void assertPointLookupsAreGenerated(QueryPlan queryPlan, int noOfPointLookups)
throws SQLException {
ExplainPlan explain = queryPlan.getExplainPlan();
ExplainPlanAttributes planAttributes = explain.getPlanStepsAsAttributes();
String expectedScanType =
"POINT LOOKUP ON " + noOfPointLookups + " KEY" + (noOfPointLookups > 1 ? "S " : " ");
assertEquals(expectedScanType, planAttributes.getExplainScanType());
}

protected void assertSkipScanIsGenerated(PreparedStatement stmt, int skipListSize)
throws SQLException {
QueryPlan queryPlan = stmt.unwrap(PhoenixPreparedStatement.class).optimizeQuery();
ExplainPlan explain = queryPlan.getExplainPlan();
ExplainPlanAttributes planAttributes = explain.getPlanStepsAsAttributes();
String expectedScanType =
"SKIP SCAN ON " + skipListSize + " KEY" + (skipListSize > 1 ? "S " : " ");
assertEquals(expectedScanType, planAttributes.getExplainScanType());
}

protected void assertRangeScanIsGenerated(PreparedStatement stmt) throws SQLException {
QueryPlan queryPlan = stmt.unwrap(PhoenixPreparedStatement.class).optimizeQuery();
ExplainPlan explain = queryPlan.getExplainPlan();
ExplainPlanAttributes planAttributes = explain.getPlanStepsAsAttributes();
String expectedScanType = "RANGE SCAN ";
assertEquals(expectedScanType, planAttributes.getExplainScanType());
}

protected void assertDegenerateScanIsGenerated(PreparedStatement stmt) throws SQLException {
QueryPlan queryPlan = stmt.unwrap(PhoenixPreparedStatement.class).optimizeQuery();
ExplainPlan explain = queryPlan.getExplainPlan();
ExplainPlanAttributes planAttributes = explain.getPlanStepsAsAttributes();
assertNull(planAttributes.getExplainScanType());
}
}
Loading