diff --git a/dev/replace_sources_with_relocated.sh b/dev/replace_sources_with_relocated.sh
new file mode 100755
index 00000000000..e54f627a0cf
--- /dev/null
+++ b/dev/replace_sources_with_relocated.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+# This script replaces the normal sources with the ones where the packages are replaced for HBase 3.0
+# The purpose is to enable debugging from an IDE without having to change the source directory settings
+# Make sure to never commit the changes this script makes.
+
+# Usage:
+# 1. Activate the Hadoop3 maven profile in your ide (and deactive the default)
+# 2. Make sure that you have no uncommitted changes
+# 3. Run "mvn clean package -am -pl phoenix-core -Dhbase.profile=3.0 -DskipTests"
+# 4. Run this script
+# 5. Work with the source in the IDE
+# 6. get a diff of your fixes
+# 7. Run "git reset --hard"
+# 8. Re-apply your changes.
+
+orig_dir=$(pwd)
+cd "$(dirname "$0")"/..
+cp -r phoenix-core/target/generated-sources/replaced/* phoenix-core/src
+cp -r phoenix-core-client/target/generated-sources/replaced/* phoenix-core-client/src
+cp -r phoenix-core-server/target/generated-sources/replaced/* phoenix-core-server/src
+
+cd $orig_dir
diff --git a/phoenix-core-client/pom.xml b/phoenix-core-client/pom.xml
index 07769c9f685..a9e4c526058 100644
--- a/phoenix-core-client/pom.xml
+++ b/phoenix-core-client/pom.xml
@@ -68,18 +68,10 @@
org.apache.hbase
hbase-hadoop-compat
-
- org.apache.hbase
- hbase-hadoop2-compat
-
org.apache.hbase
hbase-zookeeper
-
- org.apache.hbase
- hbase-protocol
-
org.apache.hbase
hbase-protocol-shaded
@@ -279,7 +271,7 @@
hbaseMinor = Integer.parseInt(versionMatcher.group(2));
hbasePatch = Integer.parseInt(versionMatcher.group(3));
- hbaseMajor == 2 && (
+ (hbaseMajor == 2 && (
("${hbase.compat.version}".equals("2.4.1")
&& hbaseMinor == 4
&& hbasePatch >=1)
@@ -291,7 +283,11 @@
&& hbasePatch >=4)
|| ("${hbase.compat.version}".equals("2.6.0")
&& hbaseMinor == 6
- && hbasePatch >=0)
+ && hbasePatch >=0))
+ || (hbaseMajor == 3 && (
+ "${hbase.compat.version}".equals("3.0.0")
+ && hbaseMinor == 0
+ && hbasePatch >=0))
)
@@ -439,5 +435,173 @@
+ ${actualSourceDirectory}
+
+
+ hbase-2.x
+
+
+ hbase.profile
+
+ !3.0
+
+
+
+ src/main/java
+
+
+
+
+
+
+
+ org.apache.hbase
+ hbase-protocol
+
+
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
+
+ add-antlr-source
+
+ add-source
+
+ generate-sources
+
+
+ ${antlr-output.dir}
+ ${antlr-input.dir}
+
+
+
+
+
+
+
+
+
+ hbase-3.x
+
+
+ hbase.profile
+ 3.0
+
+
+
+ 4.31.1
+ target/generated-sources/replaced/main/java
+
+
+
+
+
+ com.google.code.maven-replacer-plugin
+ replacer
+
+
+ replace-generated-sources
+
+ replace
+
+ process-sources
+
+ ${basedir}/target/generated-sources/protobuf
+
+ **/*.java
+
+ true
+
+
+ ([^\.])com.google.protobuf
+ $1org.apache.hbase.thirdparty.com.google.protobuf
+
+
+ ([^\.])org.apache.hadoop.hbase.protobuf.generated
+ $1org.apache.hadoop.hbase.shaded.protobuf.generated
+
+
+
+
+
+ replace-sources
+
+ replace
+
+ process-sources
+
+ ${basedir}/src
+ ../target/generated-sources/replaced
+
+ **/*.java
+
+
+
+ **/OmidTransactionContext*.java
+
+
+
+
+
+ ([^\.])com.google.protobuf
+ $1org.apache.hbase.thirdparty.com.google.protobuf
+
+
+ ([^\.])org.apache.hadoop.hbase.protobuf.generated
+ $1org.apache.hadoop.hbase.shaded.protobuf.generated
+
+
+
+
+
+ copy-sources
+
+ replace
+
+ process-sources
+
+ ${basedir}/src
+ ../target/generated-sources/replaced
+
+
+ **/OmidTransactionContext*.java
+
+
+
+
+
+
+
+
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
+
+ add-antlr-source
+
+ add-source
+
+ generate-sources
+
+
+ ${antlr-output.dir}
+ ${antlr-input.dir}
+
+
+
+
+
+
+
+
+
diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java
index 5e3fc6de068..fc7c638e28a 100644
--- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java
+++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java
@@ -17,42 +17,23 @@
*/
package org.apache.hadoop.hbase.ipc.controller;
-import java.util.List;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CellScannable;
-import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.phoenix.compat.hbase.CompatRPCControllerFactory;
/**
* {@link RpcControllerFactory} that sets the priority of metadata rpc calls to be processed in its
* own queue.
*/
-public class ClientRpcControllerFactory extends RpcControllerFactory {
+public class ClientRpcControllerFactory extends CompatRPCControllerFactory {
public ClientRpcControllerFactory(Configuration conf) {
super(conf);
}
@Override
- public HBaseRpcController newController() {
- HBaseRpcController delegate = super.newController();
- return getController(delegate);
- }
-
- @Override
- public HBaseRpcController newController(CellScanner cellScanner) {
- HBaseRpcController delegate = super.newController(cellScanner);
- return getController(delegate);
- }
-
- @Override
- public HBaseRpcController newController(List cellIterables) {
- HBaseRpcController delegate = super.newController(cellIterables);
- return getController(delegate);
- }
-
- private HBaseRpcController getController(HBaseRpcController delegate) {
+ protected HBaseRpcController getController(HBaseRpcController delegate) {
return new MetadataRpcController(delegate, conf);
}
diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java
index 21ffcfcbde2..178179b32aa 100644
--- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java
+++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java
@@ -17,43 +17,23 @@
*/
package org.apache.hadoop.hbase.ipc.controller;
-import java.util.List;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CellScannable;
-import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.phoenix.compat.hbase.CompatRPCControllerFactory;
/**
* RpcControllerFactory that should only be used when creating Table for making remote RPCs to the
* region servers hosting global mutable index table regions. This controller factory shouldn't be
* globally configured anywhere and is meant to be used only internally by Phoenix indexing code.
*/
-public class InterRegionServerIndexRpcControllerFactory extends RpcControllerFactory {
+public class InterRegionServerIndexRpcControllerFactory extends CompatRPCControllerFactory {
public InterRegionServerIndexRpcControllerFactory(Configuration conf) {
super(conf);
}
@Override
- public HBaseRpcController newController() {
- HBaseRpcController delegate = super.newController();
- return getController(delegate);
- }
-
- @Override
- public HBaseRpcController newController(CellScanner cellScanner) {
- HBaseRpcController delegate = super.newController(cellScanner);
- return getController(delegate);
- }
-
- @Override
- public HBaseRpcController newController(List cellIterables) {
- HBaseRpcController delegate = super.newController(cellIterables);
- return getController(delegate);
- }
-
- private HBaseRpcController getController(HBaseRpcController delegate) {
+ protected HBaseRpcController getController(HBaseRpcController delegate) {
// construct a chain of controllers: metadata, index and standard controller
IndexRpcController indexRpcController = new IndexRpcController(delegate, conf);
return new MetadataRpcController(indexRpcController, conf);
diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerRpcControllerFactory.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerRpcControllerFactory.java
index afbd77c65b4..7d60e2af35f 100644
--- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerRpcControllerFactory.java
+++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerRpcControllerFactory.java
@@ -17,42 +17,22 @@
*/
package org.apache.hadoop.hbase.ipc.controller;
-import java.util.List;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CellScannable;
-import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.phoenix.compat.hbase.CompatRPCControllerFactory;
/**
* {@link RpcControllerFactory} that sets the appropriate priority of server-server RPC calls
* destined for Phoenix SYSTEM tables.
*/
-public class ServerRpcControllerFactory extends RpcControllerFactory {
+public class ServerRpcControllerFactory extends CompatRPCControllerFactory {
public ServerRpcControllerFactory(Configuration conf) {
super(conf);
}
- @Override
- public HBaseRpcController newController() {
- HBaseRpcController delegate = super.newController();
- return getController(delegate);
- }
-
- @Override
- public HBaseRpcController newController(CellScanner cellScanner) {
- HBaseRpcController delegate = super.newController(cellScanner);
- return getController(delegate);
- }
-
- @Override
- public HBaseRpcController newController(List cellIterables) {
- HBaseRpcController delegate = super.newController(cellIterables);
- return getController(delegate);
- }
-
- private HBaseRpcController getController(HBaseRpcController delegate) {
+ protected HBaseRpcController getController(HBaseRpcController delegate) {
return new ServerRpcController(delegate, conf);
}
diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerSideRPCControllerFactory.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerSideRPCControllerFactory.java
index 3ff84ef353d..30181453233 100644
--- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerSideRPCControllerFactory.java
+++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerSideRPCControllerFactory.java
@@ -21,8 +21,9 @@
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
/**
- * {@link RpcControllerFactory} that should only be used when making server-server remote RPCs to
- * the region servers hosting Phoenix SYSTEM tables.
+ * Factory that should only be used when making server-server remote RPCs to the region servers
+ * hosting Phoenix SYSTEM tables. Despite the name, this does NOT implement
+ * {@link RpcControllerFactory}
*/
public class ServerSideRPCControllerFactory {
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SystemCatalogViewIndexIdFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SystemCatalogViewIndexIdFilter.java
index 8fd17219aea..e36ddfbe480 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SystemCatalogViewIndexIdFilter.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SystemCatalogViewIndexIdFilter.java
@@ -32,6 +32,7 @@
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.filter.FilterBase;
import org.apache.hadoop.hbase.util.Writables;
@@ -89,39 +90,47 @@ public void filterRowCells(List kvs) throws IOException {
viewIndexIdDataTypeCell.getValueOffset(), viewIndexIdDataTypeCell.getValueLength(),
PInteger.INSTANCE, SortOrder.ASC);
}
- if (this.clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG) {
- /*
- * For pre-4.15 client select query cannot include VIEW_INDEX_ID_DATA_TYPE as part of the
- * projected columns; for this reason, the TYPE will always be NULL. Since the pre-4.15
- * client always assume the VIEW_INDEX_ID column is type of SMALLINT, we need to retrieve
- * the BIGINT cell to SMALLINT cell. VIEW_INDEX_ID_DATA_TYPE, VIEW_INDEX_ID(Cell
- * representation of the data) NULL, SMALLINT -> DO NOT CONVERT SMALLINT, SMALLINT -> DO NOT
- * CONVERT BIGINT, BIGINT -> RETRIEVE AND SEND SMALLINT BACK
- */
- if (
- type == NULL_DATA_TYPE_VALUE
- && viewIndexIdCell.getValueLength() > VIEW_INDEX_ID_SMALLINT_TYPE_VALUE_LEN
- ) {
- Cell keyValue =
- ViewIndexIdRetrieveUtil.getRetrievedViewIndexIdCell(viewIndexIdCell, false);
- Collections.replaceAll(kvs, viewIndexIdCell, keyValue);
- }
- } else {
- /*
- * For post-4.15 client select query needs to include VIEW_INDEX_ID_DATA_TYPE as part of the
- * projected columns, and VIEW_INDEX_ID depends on it. VIEW_INDEX_ID_DATA_TYPE,
- * VIEW_INDEX_ID(Cell representation of the data) NULL, SMALLINT -> RETRIEVE AND SEND BIGINT
- * BACK SMALLINT, SMALLINT -> RETRIEVE AND SEND BIGINT BACK BIGINT, BIGINT -> DO NOT
- * RETRIEVE
- */
- if (
- type != Types.BIGINT
- && viewIndexIdCell.getValueLength() < VIEW_INDEX_ID_BIGINT_TYPE_PTR_LEN
- ) {
- Cell keyValue =
- ViewIndexIdRetrieveUtil.getRetrievedViewIndexIdCell(viewIndexIdCell, true);
- Collections.replaceAll(kvs, viewIndexIdCell, keyValue);
+ try {
+ if (this.clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG) {
+ /*
+ * For pre-4.15 client select query cannot include VIEW_INDEX_ID_DATA_TYPE as part of the
+ * projected columns; for this reason, the TYPE will always be NULL. Since the pre-4.15
+ * client always assume the VIEW_INDEX_ID column is type of SMALLINT, we need to retrieve
+ * the BIGINT cell to SMALLINT cell. VIEW_INDEX_ID_DATA_TYPE, VIEW_INDEX_ID(Cell
+ * representation of the data) NULL, SMALLINT -> DO NOT CONVERT SMALLINT, SMALLINT -> DO
+ * NOT CONVERT BIGINT, BIGINT -> RETRIEVE AND SEND SMALLINT BACK
+ */
+ if (
+ type == NULL_DATA_TYPE_VALUE
+ && viewIndexIdCell.getValueLength() > VIEW_INDEX_ID_SMALLINT_TYPE_VALUE_LEN
+ ) {
+
+ Cell keyValue = ViewIndexIdRetrieveUtil
+ .getRetrievedViewIndexIdCell((ExtendedCell) viewIndexIdCell, false);
+ Collections.replaceAll(kvs, viewIndexIdCell, keyValue);
+ }
+ } else {
+ /*
+ * For post-4.15 client select query needs to include VIEW_INDEX_ID_DATA_TYPE as part of
+ * the projected columns, and VIEW_INDEX_ID depends on it. VIEW_INDEX_ID_DATA_TYPE,
+ * VIEW_INDEX_ID(Cell representation of the data) NULL, SMALLINT -> RETRIEVE AND SEND
+ * BIGINT BACK SMALLINT, SMALLINT -> RETRIEVE AND SEND BIGINT BACK BIGINT, BIGINT -> DO
+ * NOT RETRIEVE
+ */
+ if (
+ type != Types.BIGINT
+ && viewIndexIdCell.getValueLength() < VIEW_INDEX_ID_BIGINT_TYPE_PTR_LEN
+ ) {
+ Cell keyValue = ViewIndexIdRetrieveUtil
+ .getRetrievedViewIndexIdCell((ExtendedCell) viewIndexIdCell, true);
+ Collections.replaceAll(kvs, viewIndexIdCell, keyValue);
+ }
}
+ } catch (ClassCastException e) {
+ // As indicated in the Filter interface comments, Filters always work on ExtendedCells.
+ // Throw an IOException is something goes wrong.
+ throw new IOException(
+ "Filter got Cell that is not an ExtendedCell. This should not happen.", e);
}
}
}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/MultiMutation.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/MultiMutation.java
index 2ab69fbeff0..0015895ddc8 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/MultiMutation.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/MultiMutation.java
@@ -21,6 +21,8 @@
import java.util.List;
import java.util.Map.Entry;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellBuilder;
+import org.apache.hadoop.hbase.CellBuilderType;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
@@ -40,7 +42,8 @@ public void addAll(Mutation stored) {
byte[] family = kvs.getKey();
List list = getKeyValueList(family, kvs.getValue().size());
list.addAll(kvs.getValue());
- familyMap.put(family, list);
+ // override generics to fix the Cell/ExtendedCell type changes between HBase 2/3
+ familyMap.put(family, (List) list);
}
// add all the attributes, not overriding already stored ones
@@ -52,13 +55,19 @@ public void addAll(Mutation stored) {
}
private List getKeyValueList(byte[] family, int hint) {
- List list = familyMap.get(family);
+ // override generics to fix the Cell/ExtendedCell type changes between HBase 2/3
+ List list = (List) (familyMap.get(family));
if (list == null) {
list = new ArrayList(hint);
}
return list;
}
+ // No @Override to maintain Hadoop 2 compatibility
+ public CellBuilder getCellBuilder(CellBuilderType cellBuilderType) {
+ throw new IllegalArgumentException("MultiMutation does not implement a CellBuilder");
+ }
+
@Override
public byte[] getRow() {
return this.rowKey.copyBytesIfNecessary();
@@ -80,5 +89,4 @@ public boolean equals(Object obj) {
MultiMutation other = (MultiMutation) obj;
return rowKey.equals(other.rowKey);
}
-
}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/OffsetCell.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/OffsetCell.java
index 60a9ba6e30d..582de9e422a 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/OffsetCell.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/OffsetCell.java
@@ -17,14 +17,15 @@
*/
package org.apache.phoenix.hbase.index;
-import org.apache.hadoop.hbase.Cell;
+import java.io.IOException;
+import org.apache.hadoop.hbase.ExtendedCell;
-public class OffsetCell implements Cell {
+public class OffsetCell implements ExtendedCell {
- private Cell cell;
+ private ExtendedCell cell;
private int offset;
- public OffsetCell(Cell cell, int offset) {
+ public OffsetCell(ExtendedCell cell, int offset) {
this.cell = cell;
this.offset = offset;
}
@@ -134,4 +135,19 @@ public int getSerializedSize() {
return cell.getSerializedSize() - offset;
}
+ @Override
+ public void setSequenceId(long seqId) throws IOException {
+ cell.setSequenceId(seqId);
+ }
+
+ @Override
+ public void setTimestamp(long ts) throws IOException {
+ cell.setTimestamp(ts);
+ }
+
+ @Override
+ public void setTimestamp(byte[] ts) throws IOException {
+ cell.setTimestamp(ts);
+ }
+
}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixHAAdmin.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixHAAdmin.java
index e1db43ae9f4..06db71cf602 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixHAAdmin.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixHAAdmin.java
@@ -35,6 +35,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.util.PairOfSameType;
+import org.apache.phoenix.jdbc.ClusterRoleRecord.RegistryType;
import org.apache.phoenix.util.JDBCUtil;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
@@ -91,7 +92,7 @@ public PhoenixHAAdmin(String zkUrl, Configuration conf,
Preconditions.checkNotNull(zkUrl);
Preconditions.checkNotNull(conf);
Preconditions.checkNotNull(highAvailibilityCuratorProvider);
- this.zkUrl = JDBCUtil.formatUrl(zkUrl);
+ this.zkUrl = JDBCUtil.formatUrl(zkUrl, RegistryType.ZK);
this.conf = conf;
conf.iterator().forEachRemaining(k -> properties.setProperty(k.getKey(), k.getValue()));
this.highAvailibilityCuratorProvider = highAvailibilityCuratorProvider;
@@ -109,7 +110,7 @@ public static String getLocalZkUrl(Configuration conf) {
}
String portStr = conf.get(HConstants.ZOOKEEPER_CLIENT_PORT);
- int port = HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT;
+ int port = HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT;
if (portStr != null) {
try {
port = Integer.parseInt(portStr);
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ZKConnectionInfo.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ZKConnectionInfo.java
index 9010feea3d0..bccf2e815d9 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ZKConnectionInfo.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ZKConnectionInfo.java
@@ -26,6 +26,7 @@
import org.apache.hadoop.hbase.security.User;
import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.ReadOnlyProps;
+import org.slf4j.LoggerFactory;
/**
* ConnectionInfo class for org.apache.hadoop.hbase.client.ZKConnectionRegistry This used to be the
@@ -33,6 +34,8 @@
*/
public class ZKConnectionInfo extends ConnectionInfo {
+ private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(ZKConnectionInfo.class);
+
public static final String ZK_REGISTRY_NAME =
"org.apache.hadoop.hbase.client.ZKConnectionRegistry";
@@ -295,17 +298,27 @@ protected void normalize() throws SQLException {
}
}
+ LOGGER.error("1:" + zkPort);
+
// Normalize connInfo so that a url explicitly specifying versus implicitly inheriting
// the default values will both share the same ConnectionQueryServices.
if (zkPort == null) {
String zkPortString = get(HConstants.CLIENT_ZOOKEEPER_CLIENT_PORT);
+ LOGGER.error("2:" + zkPortString);
+
if (zkPortString == null) {
zkPortString = get(HConstants.ZOOKEEPER_CLIENT_PORT);
+ LOGGER.error("3:" + zkPortString);
+
}
if (zkPortString == null) {
zkPort = HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT;
+ LOGGER.error("4:" + zkPort);
+
} else {
zkPort = Integer.parseInt(zkPortString);
+ LOGGER.error("5:" + zkPort);
+
}
}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/protobuf/ProtobufUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/protobuf/ProtobufUtil.java
index fccf1bbe32a..23d29c73cfc 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/protobuf/ProtobufUtil.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/protobuf/ProtobufUtil.java
@@ -31,6 +31,7 @@
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
import org.apache.hadoop.util.StringUtils;
import org.apache.phoenix.compat.hbase.ByteStringer;
+import org.apache.phoenix.compat.hbase.CompatUtil;
import org.apache.phoenix.coprocessor.generated.ChildLinkMetaDataProtos.CreateViewAddChildLinkRequest;
import org.apache.phoenix.coprocessor.generated.MetaDataProtos;
import org.apache.phoenix.coprocessor.generated.PTableProtos;
@@ -125,7 +126,7 @@ private static List getMutations(List mutations) throws IO
List result = new ArrayList();
for (ByteString mutation : mutations) {
MutationProto mProto = MutationProto.parseFrom(mutation);
- result.add(org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(mProto));
+ result.add(CompatUtil.toMutation(mProto));
}
return result;
}
@@ -139,7 +140,7 @@ public static MutationProto toProto(Mutation mutation) throws IOException {
} else {
throw new IllegalArgumentException("Only Put and Delete are supported");
}
- return org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(type, mutation);
+ return CompatUtil.toMutation(type, mutation);
}
public static ServerCachingProtos.ImmutableBytesWritable toProto(ImmutableBytesWritable w) {
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/CompiledConditionalTTLExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/CompiledConditionalTTLExpression.java
index 6c66938de23..7919fa46e20 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/CompiledConditionalTTLExpression.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/CompiledConditionalTTLExpression.java
@@ -35,8 +35,8 @@
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.ByteStringer;
import org.apache.hadoop.io.WritableUtils;
+import org.apache.phoenix.compat.hbase.ByteStringer;
import org.apache.phoenix.coprocessor.generated.PTableProtos;
import org.apache.phoenix.coprocessor.generated.ServerCachingProtos;
import org.apache.phoenix.expression.Expression;
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 99ffe141566..f5be4065666 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -4729,9 +4729,9 @@ public MutationState addColumn(PTable table, List origColumnDefs,
/**
* To check if TTL is defined at any of the child below we are checking it at
* {@link org.apache.phoenix.coprocessor.MetaDataEndpointImpl#mutateColumn(List, ColumnMutator, int, PTable, PTable, boolean)}
- * level where in function
- * {@link org.apache.phoenix.coprocessor.MetaDataEndpointImpl# validateIfMutationAllowedOnParent(PTable, List, PTableType, long, byte[], byte[], byte[], List, int)}
- * we are already traversing through allDescendantViews.
+ * level where in function {@link org.apache.phoenix.coprocessor.MetaDataEndpointImpl#
+ * validateIfMutationAllowedOnParent(PTable, List, PTableType, long, byte[], byte[],
+ * byte[], List, int)} we are already traversing through allDescendantViews.
*/
}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/MetaDataUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index 5396fc70f8c..fbf016d0a22 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -408,14 +408,18 @@ public static void conditionallyAddTagsToPutCells(Put somePut, byte[] family, by
cell.getQualifierLength(), qualifier, 0, qualifier.length) == 0
&& (valueArray == null || !CellUtil.matchingValue(cell, valueArray))
) {
- ExtendedCell extendedCell =
- cellBuilder.setRow(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())
- .setFamily(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())
- .setQualifier(cell.getQualifierArray(), cell.getQualifierOffset(),
- cell.getQualifierLength())
- .setValue(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())
- .setTimestamp(cell.getTimestamp()).setType(cell.getType())
- .setTags(TagUtil.concatTags(tagArray, cell)).build();
+ // This is a safety play. In all current versions
+ // org.apache.hadoop.hbase.client.Mutation.getFamilyCellMap() returns ExtendedCells only.
+ byte[] concatTags = (cell instanceof ExtendedCell)
+ ? TagUtil.concatTags(tagArray, (ExtendedCell) cell)
+ : tagArray;
+ ExtendedCell extendedCell = cellBuilder
+ .setRow(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())
+ .setFamily(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())
+ .setQualifier(cell.getQualifierArray(), cell.getQualifierOffset(),
+ cell.getQualifierLength())
+ .setValue(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())
+ .setTimestamp(cell.getTimestamp()).setType(cell.getType()).setTags(concatTags).build();
// Replace existing cell with a cell that has the custom tags list
newCells.add(extendedCell);
} else {
@@ -583,7 +587,7 @@ public static KeyValue getMutationValue(Mutation headerRow, byte[] key, KeyValue
List kvs = headerRow.getFamilyCellMap().get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES);
if (kvs != null) {
for (Cell cell : kvs) {
- KeyValue kv = org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(cell);
+ KeyValue kv = PhoenixKeyValueUtil.ensureKeyValue(cell);
if (builder.compareQualifier(kv, key, 0, key.length) == 0) {
return kv;
}
@@ -597,7 +601,7 @@ public static boolean setMutationValue(Mutation headerRow, byte[] key, KeyValueB
List kvs = headerRow.getFamilyCellMap().get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES);
if (kvs != null) {
for (Cell cell : kvs) {
- KeyValue kv = org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(cell);
+ KeyValue kv = PhoenixKeyValueUtil.ensureKeyValue(cell);
if (builder.compareQualifier(kv, key, 0, key.length) == 0) {
KeyValueBuilder.addQuietly(headerRow, keyValue);
return true;
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java
index 4f4bbffa5e8..5c73965197e 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java
@@ -31,6 +31,7 @@
import org.apache.hadoop.hbase.CellBuilderFactory;
import org.apache.hadoop.hbase.CellBuilderType;
import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.client.Mutation;
@@ -215,7 +216,13 @@ public static KeyValue ensureKeyValue(Cell c) {
if (c instanceof KeyValue) {
return (KeyValue) c;
} else {
- return KeyValueUtil.copyToNewKeyValue(c);
+ // TODO how to handle this ? The main issue is that Cell in HBase 3 no longer has sequenceId.
+ // Normally, these off-heap to on-heap copy methods are only to be called on the server side,
+ // where we're not even supposed to see non-ExtendedCells.
+ // This currently errors out if it encounters non-ExtendedCells, but we could also just create
+ // a
+ // KeyValue with a 0 sequenceId.
+ return KeyValueUtil.copyToNewKeyValue((ExtendedCell) c);
}
}
@@ -247,7 +254,13 @@ public static KeyValue maybeCopyCell(Cell c) {
if (c instanceof KeyValue) {
return (KeyValue) c;
}
- return KeyValueUtil.copyToNewKeyValue(c);
+
+ // TODO how to handle this ? The main issue is that Cell in HBase 3 no longer has sequenceId.
+ // Normally, these off-heap to on-heap copy methods are only to be called on the server side,
+ // where we're not even supposed to see non-ExtendedCells.
+ // This currently errors out if it encounters non-ExtendedCells, but we could also just create a
+ // KeyValue with a 0 sequenceId.
+ return KeyValueUtil.copyToNewKeyValue((ExtendedCell) c);
}
/**
@@ -261,7 +274,7 @@ public static List maybeCopyCellList(List cells) {
Cell c = cellsIt.next();
// FIXME this does not catch all off-heap cells
if (c instanceof ByteBufferExtendedCell) {
- cellsIt.set(KeyValueUtil.copyToNewKeyValue(c));
+ cellsIt.set(KeyValueUtil.copyToNewKeyValue((ByteBufferExtendedCell) c));
}
}
return cells;
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ScanUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ScanUtil.java
index d668af16b42..e60d183a33c 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ScanUtil.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ScanUtil.java
@@ -1236,8 +1236,7 @@ public static boolean isStrictTTL(Scan scan) {
}
public static boolean isEmptyColumn(Cell cell, byte[] emptyCF, byte[] emptyCQ) {
- return CellUtil.matchingFamily(cell, emptyCF, 0, emptyCF.length)
- && CellUtil.matchingQualifier(cell, emptyCQ, 0, emptyCQ.length);
+ return CellUtil.matchingFamily(cell, emptyCF) && CellUtil.matchingQualifier(cell, emptyCQ);
}
public static long getMaxTimestamp(List cellList) {
@@ -1614,11 +1613,11 @@ public static boolean isDummy(Result result) {
return isDummy(cell);
}
- public static boolean isDummy(List| result) {
+ public static boolean isDummy(List result) {
if (result.size() != 1) {
return false;
}
- Cell cell = result.get(0);
+ Cell cell = (Cell) result.get(0);
return isDummy(cell);
}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index 39437aa25f0..5bdea09b6f0 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -371,8 +371,15 @@ private static void preSplitSequenceTable(PhoenixConnection conn, int nSaltBucke
} finally {
try {
admin.close();
- } catch (IOException e) {
- LOGGER.warn("Exception while closing admin during pre-split", e);
+ } catch (Throwable t) {
+ // Workaround for HBase 2/3 API changes
+ if (t instanceof IOException) {
+ LOGGER.warn("Exception while closing admin during pre-split", 5);
+ } else if (t instanceof RuntimeException) {
+ throw (RuntimeException) t;
+ } else if (t instanceof Error) {
+ throw (Error) t;
+ }
}
}
}
@@ -2343,8 +2350,15 @@ private static void upgradeDescVarLengthRowKeys(PhoenixConnection upgradeConn,
if (admin != null) {
admin.close();
}
- } catch (IOException e) {
- LOGGER.warn("Unable to close admin after upgrade:", e);
+ } catch (Throwable t) {
+ // Workaround for HBase 2/3 API changes
+ if (t instanceof IOException) {
+ LOGGER.warn("Exception while closing admin during pre-split", 5);
+ } else if (t instanceof RuntimeException) {
+ throw (RuntimeException) t;
+ } else if (t instanceof Error) {
+ throw (Error) t;
+ }
}
}
}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ViewIndexIdRetrieveUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ViewIndexIdRetrieveUtil.java
index ff80ce381e8..a5773b8628f 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ViewIndexIdRetrieveUtil.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ViewIndexIdRetrieveUtil.java
@@ -19,6 +19,7 @@
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.phoenix.schema.types.PLong;
@@ -33,7 +34,7 @@ private ViewIndexIdRetrieveUtil() {
}
- public static Cell buildNewCell(Cell viewIndexIdCell, byte[] newVal) {
+ public static Cell buildNewCell(ExtendedCell viewIndexIdCell, byte[] newVal) {
KeyValue keyValue = new KeyValue(viewIndexIdCell.getRowArray(), viewIndexIdCell.getRowOffset(),
viewIndexIdCell.getRowLength(), viewIndexIdCell.getFamilyArray(),
viewIndexIdCell.getFamilyOffset(), viewIndexIdCell.getFamilyLength(),
@@ -44,7 +45,8 @@ public static Cell buildNewCell(Cell viewIndexIdCell, byte[] newVal) {
return keyValue;
}
- public static Cell getRetrievedViewIndexIdCell(Cell viewIndexIdCell, boolean isShortToLong) {
+ public static Cell getRetrievedViewIndexIdCell(ExtendedCell viewIndexIdCell,
+ boolean isShortToLong) {
ImmutableBytesWritable columnValue =
new ImmutableBytesWritable(CellUtil.cloneValue(viewIndexIdCell));
diff --git a/phoenix-core-server/pom.xml b/phoenix-core-server/pom.xml
index 45611f77507..5f44d5f7355 100644
--- a/phoenix-core-server/pom.xml
+++ b/phoenix-core-server/pom.xml
@@ -87,10 +87,6 @@
org.apache.hbase
hbase-client
| | | | | | | | | | | |
-
- org.apache.hbase
- hbase-protocol
-
org.apache.hbase
hbase-protocol-shaded
@@ -129,10 +125,6 @@
org.apache.htrace
htrace-core
-
- com.google.protobuf
- protobuf-java
-
com.fasterxml.jackson.core
jackson-annotations
@@ -177,10 +169,6 @@
-
- org.codehaus.mojo
- build-helper-maven-plugin
-
maven-dependency-plugin
@@ -188,5 +176,180 @@
+ ${actualSourceDirectory}
+
+
+ hbase-2.x
+
+
+ hbase.profile
+
+ !3.0
+
+
+
+ src/main/java
+
+
+
+ org.apache.hbase
+ hbase-protocol
+
+
+ com.google.protobuf
+ protobuf-java
+
+
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
+
+ add-antlr-source
+
+ add-source
+
+ generate-sources
+
+
+ ${antlr-output.dir}
+ ${antlr-input.dir}
+
+
+
+
+
+
+
+
+
+ hbase-3.x
+
+
+ hbase.profile
+ 3.0
+
+
+
+ target/generated-sources/replaced/main/java
+
+ 4.31.1
+
+
+
+
+ org.apache.hbase
+ hbase-balancer
+
+
+
+
+
+ com.google.code.maven-replacer-plugin
+ replacer
+
+
+ replace-generated-sources
+
+ replace
+
+ process-sources
+
+ ${basedir}/target/generated-sources/protobuf
+
+ **/*.java
+
+ true
+
+
+ ([^\.])com.google.protobuf
+ $1org.apache.hbase.thirdparty.com.google.protobuf
+
+
+ ([^\.])org.apache.hadoop.hbase.protobuf.generated
+ $1org.apache.hadoop.hbase.shaded.protobuf.generated
+
+
+
+
+
+ replace-sources
+
+ replace
+
+ process-sources
+
+ ${basedir}/src
+ ../target/generated-sources/replaced
+
+ **/*.java
+
+
+
+ **/OmidTransactionContext*.java
+
+
+
+
+
+ ([^\.])com.google.protobuf
+ $1org.apache.hbase.thirdparty.com.google.protobuf
+
+
+ ([^\.])org.apache.hadoop.hbase.protobuf.generated
+ $1org.apache.hadoop.hbase.shaded.protobuf.generated
+
+
+
+
+
+ copy-sources
+
+ replace
+
+ process-sources
+
+ ${basedir}/src
+ ../target/generated-sources/replaced
+
+
+ **/OmidTransactionContext*.java
+
+
+
+
+
+
+
+
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
+
+ add-antlr-source
+
+ add-source
+
+ generate-sources
+
+
+ ${antlr-output.dir}
+ ${antlr-input.dir}
+
+
+
+
+
+
+
+
+
diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
index 228a606c27b..b60a8e9a68b 100644
--- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
+++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
@@ -74,7 +74,7 @@ public RpcScheduler create(Configuration conf, PriorityFunction priorityFunction
return scheduler;
}
- @Override
+ // Only exists in HBase 2.x API, removed from Hbase 3
public RpcScheduler create(Configuration configuration, PriorityFunction priorityFunction) {
return create(configuration, priorityFunction, null);
}
diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java
index 9ff5283632d..79b1c493adc 100644
--- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java
+++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java
@@ -80,12 +80,12 @@ public DataTableLocalIndexRegionScanner(RegionScanner scanner, Region region,
}
@Override
- public boolean next(List outResult, ScannerContext scannerContext) throws IOException {
+ public boolean next(List outResult, ScannerContext scannerContext) throws IOException {
return next(outResult);
}
@Override
- public boolean next(List results) throws IOException {
+ public boolean next(List results) throws IOException {
List dataTableResults = new ArrayList();
boolean next = super.next(dataTableResults);
addMutations(dataTableResults);
@@ -99,18 +99,17 @@ public boolean next(List results) throws IOException {
return next;
}
- private void addMutations(List dataTableResults) throws IOException {
+ private void addMutations(List dataTableResults) throws IOException {
if (!dataTableResults.isEmpty()) {
result.setKeyValues(dataTableResults);
for (IndexMaintainer maintainer : indexMaintainers) {
result.getKey(ptr);
ValueGetter valueGetter = maintainer
.createGetterFromKeyValues(ImmutableBytesPtr.copyBytesIfNecessary(ptr), dataTableResults);
- List list =
- maintainer
- .buildUpdateMutation(kvBuilder, valueGetter, ptr,
- dataTableResults.get(0).getTimestamp(), startKey, endKey, false)
- .getFamilyCellMap().get(localIndexFamily);
+ List list = maintainer
+ .buildUpdateMutation(kvBuilder, valueGetter, ptr,
+ ((Cell) dataTableResults.get(0)).getTimestamp(), startKey, endKey, false)
+ .getFamilyCellMap().get(localIndexFamily);
Put put = null;
Delete del = null;
for (Cell cell : list) {
diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
index 338991e8456..8a6ace68921 100644
--- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
+++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
@@ -73,13 +73,13 @@ public IndexHalfStoreFileReader(final FileSystem fs, final Path p, final CacheCo
final Map indexMaintainers,
final byte[][] viewConstants, final RegionInfo regionInfo, byte[] regionStartKeyInHFile,
byte[] splitKey, boolean primaryReplicaStoreFile, AtomicInteger refCount,
- RegionInfo currentRegion) throws IOException {
+ RegionInfo currentRegion, StoreFileReader originalReader) throws IOException {
super(fs, cacheConf, conf,
new ReaderContext(p, in, size, new HFileSystem(fs), primaryReplicaStoreFile,
ReaderType.STREAM),
new HFileInfo(new ReaderContext(p, in, size, new HFileSystem(fs), primaryReplicaStoreFile,
ReaderType.STREAM), conf),
- p);
+ p, r);
getHFileReader().getHFileInfo().initMetaAndIndex(getHFileReader());
this.splitkey = splitKey == null ? r.getSplitKey() : splitKey;
// Is it top or bottom half?
diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index 6d657dfbe0f..d6db4253785 100644
--- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -80,11 +80,12 @@ public Optional getRegionObserver() {
}
@Override
- public StoreFileReader preStoreFileReaderOpen(ObserverContext ctx,
- FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
- Reference r, StoreFileReader reader) throws IOException {
- TableName tableName = ctx.getEnvironment().getRegion().getTableDescriptor().getTableName();
- Region region = ctx.getEnvironment().getRegion();
+ public StoreFileReader preStoreFileReaderOpen(ObserverContext ctx, FileSystem fs, Path p,
+ FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, Reference r,
+ StoreFileReader reader) throws IOException {
+ RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) ctx.getEnvironment();
+ TableName tableName = env.getRegion().getTableDescriptor().getTableName();
+ Region region = env.getRegion();
RegionInfo childRegion = region.getRegionInfo();
byte[] splitKey = null;
if (reader == null && r != null) {
@@ -119,8 +120,8 @@ public StoreFileReader preStoreFileReaderOpen(ObserverContext mergeRegions = CompatUtil
- .getMergeRegions(ctx.getEnvironment().getConnection(), region.getRegionInfo());
+ List mergeRegions =
+ CompatUtil.getMergeRegions(env.getConnection(), region.getRegionInfo());
if (mergeRegions == null || mergeRegions.isEmpty()) {
return reader;
}
@@ -154,14 +155,13 @@ public StoreFileReader preStoreFileReaderOpen(ObserverContext indexes = dataTable.getIndexes();
Map indexMaintainers =
new HashMap();
@@ -180,7 +180,7 @@ public StoreFileReader preStoreFileReaderOpen(ObserverContext c, Store store,
- InternalScanner s, ScanType scanType, CompactionLifeCycleTracker tracker,
- CompactionRequest request) throws IOException {
+ public InternalScanner preCompact(ObserverContext c, Store store, InternalScanner s,
+ ScanType scanType, CompactionLifeCycleTracker tracker, CompactionRequest request)
+ throws IOException {
if (!isLocalIndexStore(store)) {
return s;
}
if (!store.hasReferences()) {
+ RegionCoprocessorEnvironment e = (RegionCoprocessorEnvironment) c.getEnvironment();
InternalScanner repairScanner = null;
- if (
- request.isMajor()
- && (!RepairUtil.isLocalIndexStoreFilesConsistent(c.getEnvironment(), store))
- ) {
+ if (request.isMajor() && (!RepairUtil.isLocalIndexStoreFilesConsistent(e, store))) {
LOGGER.info("we have found inconsistent data for local index for region:"
- + c.getEnvironment().getRegion().getRegionInfo());
+ + e.getRegion().getRegionInfo());
if (c.getEnvironment().getConfiguration().getBoolean(LOCAL_INDEX_AUTOMATIC_REPAIR, true)) {
- LOGGER.info("Starting automatic repair of local Index for region:"
- + c.getEnvironment().getRegion().getRegionInfo());
- repairScanner = getRepairScanner(c.getEnvironment(), store);
+ LOGGER.info(
+ "Starting automatic repair of local Index for region:" + e.getRegion().getRegionInfo());
+ repairScanner = getRepairScanner(e, store);
}
}
if (repairScanner != null) {
diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java
index 786da2c716c..9882101b1aa 100644
--- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java
+++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java
@@ -24,6 +24,7 @@
import java.util.Optional;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -51,10 +52,10 @@ public LocalIndexStoreFileScanner(IndexHalfStoreFileReader reader, boolean cache
}
@Override
- public Cell next() throws IOException {
- Cell next = super.next();
+ public ExtendedCell next() throws IOException {
+ ExtendedCell next = (ExtendedCell) super.next();
while (next != null && !isSatisfiedMidKeyCondition(next)) {
- next = super.next();
+ next = (ExtendedCell) super.next();
}
while (super.peek() != null && !isSatisfiedMidKeyCondition(super.peek())) {
super.next();
@@ -66,15 +67,15 @@ public Cell next() throws IOException {
}
@Override
- public Cell peek() {
- Cell peek = super.peek();
+ public ExtendedCell peek() {
+ ExtendedCell peek = (ExtendedCell) super.peek();
if (peek != null && (reader.isTop() || changeBottomKeys)) {
peek = getChangedKey(peek, !reader.isTop() && changeBottomKeys);
}
return peek;
}
- private Cell getChangedKey(Cell next, boolean changeBottomKeys) {
+ private ExtendedCell getChangedKey(ExtendedCell next, boolean changeBottomKeys) {
// If it is a top store file change the StartKey with SplitKey in Key
// and produce the new value corresponding to the change in key
byte[] changedKey = getNewRowkeyByRegionStartKeyReplacedWithSplitKey(next, changeBottomKeys);
@@ -90,8 +91,9 @@ private Cell getChangedKey(Cell next, boolean changeBottomKeys) {
* Enforce seek all the time for local index store file scanner otherwise some times hbase might
* return fake kvs not in physical files.
*/
- @Override
- public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) throws IOException {
+ // HBase 3 API
+ public boolean requestSeek(ExtendedCell kv, boolean forward, boolean useBloom)
+ throws IOException {
boolean requestSeek = super.requestSeek(kv, forward, useBloom);
if (requestSeek) {
Cell peek = super.peek();
@@ -105,30 +107,46 @@ public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) throws IO
return requestSeek;
}
- @Override
- public boolean seek(Cell key) throws IOException {
+ // HBase 2 API
+ public boolean requestSeek(Cell kvIn, boolean forward, boolean useBloom) throws IOException {
+ return requestSeek((ExtendedCell) kvIn, forward, useBloom);
+ }
+
+ // HBase 3 API
+ public boolean seek(ExtendedCell key) throws IOException {
return seekOrReseek(key, true);
}
- @Override
- public boolean reseek(Cell key) throws IOException {
+ // HBase 2 API
+ // TODO push these to the compatibility module ?
+ public boolean seek(Cell key) throws IOException {
+ return seek((ExtendedCell) key);
+ }
+
+ // HBase 3 API
+ public boolean reseek(ExtendedCell key) throws IOException {
return seekOrReseek(key, false);
}
- @Override
- public boolean seekToPreviousRow(Cell key) throws IOException {
+ // HBase 2 API
+ public boolean reseek(Cell key) throws IOException {
+ return reseek((ExtendedCell) key);
+ }
+
+ // HBase 3 API
+ public boolean seekToPreviousRow(ExtendedCell key) throws IOException {
KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(key);
if (reader.isTop()) {
- Optional firstKey = reader.getFirstKey();
+ Optional firstKey = reader.getFirstKey();
// This will be null when the file is empty in which we can not seekBefore to
// any key
if (firstKey.isPresent()) {
return false;
}
- if (this.comparator.compare(kv, firstKey.get(), true) <= 0) {
+ if (this.comparator.compare(kv, (ExtendedCell) firstKey.get(), true) <= 0) {
return super.seekToPreviousRow(key);
}
- Cell replacedKey = getKeyPresentInHFiles(kv);
+ ExtendedCell replacedKey = getKeyPresentInHFiles(kv);
boolean seekToPreviousRow = super.seekToPreviousRow(replacedKey);
while (super.peek() != null && !isSatisfiedMidKeyCondition(super.peek())) {
seekToPreviousRow = super.seekToPreviousRow(super.peek());
@@ -153,6 +171,11 @@ public boolean seekToPreviousRow(Cell key) throws IOException {
return seekToPreviousRow;
}
+ // HBase 2 API
+ public boolean seekToPreviousRow(Cell key) throws IOException {
+ return seekToPreviousRow((ExtendedCell) key);
+ }
+
@Override
public boolean seekToLastRow() throws IOException {
boolean seekToLastRow = super.seekToLastRow();
@@ -215,8 +238,8 @@ private KeyValue getKeyPresentInHFiles(Cell keyValue) {
/**
* @param isSeek pass true for seek, false for reseek.
*/
- public boolean seekOrReseek(Cell cell, boolean isSeek) throws IOException {
- Cell keyToSeek = cell;
+ public boolean seekOrReseek(ExtendedCell cell, boolean isSeek) throws IOException {
+ ExtendedCell keyToSeek = cell;
KeyValue splitKeyValue = new KeyValue.KeyOnlyKeyValue(reader.getSplitkey());
if (reader.isTop()) {
if (this.comparator.compare(cell, splitKeyValue, true) < 0) {
@@ -242,7 +265,7 @@ public boolean seekOrReseek(Cell cell, boolean isSeek) throws IOException {
return seekOrReseekToProperKey(isSeek, keyToSeek);
}
- private boolean seekOrReseekToProperKey(boolean isSeek, Cell kv) throws IOException {
+ private boolean seekOrReseekToProperKey(boolean isSeek, ExtendedCell kv) throws IOException {
boolean seekOrReseek = isSeek ? super.seek(kv) : super.reseek(kv);
while (seekOrReseek && super.peek() != null && !isSatisfiedMidKeyCondition(super.peek())) {
super.next();
diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java
index fa26a65923c..f73429a0143 100644
--- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java
+++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java
@@ -22,7 +22,7 @@
import java.io.InputStream;
import java.io.PushbackInputStream;
import javax.annotation.Nonnull;
-import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.codec.Codec;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -37,7 +37,7 @@ public abstract class BinaryCompatibleBaseDecoder implements Codec.Decoder {
protected static final Logger LOGGER = LoggerFactory.getLogger(BinaryCompatibleBaseDecoder.class);
protected final InputStream in;
- private Cell current = null;
+ private ExtendedCell current = null;
protected static class PBIS extends PushbackInputStream {
public PBIS(InputStream in, int size) {
@@ -99,10 +99,10 @@ protected InputStream getInputStream() {
* thrown if EOF is reached prematurely. Does not return null.
*/
@Nonnull
- protected abstract Cell parseCell() throws IOException;
+ protected abstract ExtendedCell parseCell() throws IOException;
@Override
- public Cell current() {
+ public ExtendedCell current() {
return this.current;
}
}
diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodec.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodec.java
index a0f650b1111..857611aab6c 100644
--- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodec.java
+++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodec.java
@@ -27,6 +27,7 @@
import java.io.OutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.codec.BaseDecoder;
import org.apache.hadoop.hbase.codec.BaseEncoder;
@@ -165,7 +166,7 @@ public CompressedIndexKeyValueDecoder(InputStream is, Decoder compressedDecoder)
}
@Override
- protected Cell parseCell() throws IOException {
+ protected ExtendedCell parseCell() throws IOException {
// reader the marker
int marker = this.in.read();
if (marker < 0) {
@@ -178,7 +179,7 @@ protected Cell parseCell() throws IOException {
if (!this.decoder.advance()) {
throw new IOException("Could not read next key-value from generic KeyValue Decoder!");
}
- return this.decoder.current();
+ return (ExtendedCell) this.decoder.current();
}
// its an indexedKeyValue, so parse it out specially
@@ -209,14 +210,19 @@ public void flush() throws IOException {
super.flush();
}
- @Override
- public void write(Cell cell) throws IOException {
+ // This is the Hbase 3.0 signature
+ public void write(ExtendedCell cell) throws IOException {
// make sure we are open
checkFlushed();
// use the standard encoding mechanism
KeyValueCodec.write(this.dataOutput, PhoenixKeyValueUtil.maybeCopyCell(cell));
}
+
+ // This is the Hbase 2.x signature
+ public void write(Cell cell) throws IOException {
+ write((ExtendedCell) cell);
+ }
}
/**
@@ -238,8 +244,7 @@ public void flush() throws IOException {
super.flush();
}
- @Override
- public void write(Cell cell) throws IOException {
+ public void write(ExtendedCell cell) throws IOException {
// make sure we are open
checkFlushed();
@@ -257,6 +262,10 @@ public void write(Cell cell) throws IOException {
KeyValueCodec.write(this.dataOutput, PhoenixKeyValueUtil.maybeCopyCell(cell));
}
}
+
+ public void write(Cell cell) throws IOException {
+ write((ExtendedCell) cell);
+ }
}
private static abstract class BinaryCompatiblePhoenixBaseDecoder
@@ -315,7 +324,7 @@ public BinaryCompatibleCompressedIndexKeyValueDecoder(InputStream is,
}
@Override
- protected Cell parseCell() throws IOException {
+ protected ExtendedCell parseCell() throws IOException {
// reader the marker
int marker = this.in.read();
if (marker < 0) {
@@ -328,7 +337,7 @@ protected Cell parseCell() throws IOException {
if (!this.decoder.advance()) {
throw new IOException("Could not read next key-value from generic KeyValue Decoder!");
}
- return this.decoder.current();
+ return (ExtendedCell) this.decoder.current();
}
// its an indexedKeyValue, so parse it out specially
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java b/phoenix-core-server/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
index ed703905976..f731b5ac151 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
@@ -35,7 +35,6 @@
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
@@ -368,12 +367,12 @@ public void close() throws IOException {
}
}
- public boolean next(List result, ScannerContext scannerContext) throws IOException {
+ public boolean next(List result, ScannerContext scannerContext) throws IOException {
return next(result);
}
@Override
- public boolean next(List results) throws IOException {
+ public boolean next(List results) throws IOException {
if (!cacheIter.hasNext()) {
return false;
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java
index fd829c0332b..1369ae5c2f0 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java
@@ -19,7 +19,6 @@
import java.io.IOException;
import java.util.List;
-import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.apache.hadoop.hbase.regionserver.ScannerContext;
@@ -36,10 +35,10 @@ public boolean isFilterDone() {
}
@Override
- public abstract boolean next(List results) throws IOException;
+ public abstract boolean next(List results) throws IOException;
@Override
- public abstract boolean next(List result, ScannerContext scannerContext) throws IOException;
+ public abstract boolean next(List result, ScannerContext scannerContext) throws IOException;
@Override
public boolean reseek(byte[] row) throws IOException {
@@ -47,12 +46,12 @@ public boolean reseek(byte[] row) throws IOException {
}
@Override
- public boolean nextRaw(List result) throws IOException {
+ public boolean nextRaw(List result) throws IOException {
return next(result);
}
@Override
- public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException {
+ public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException {
return next(result, scannerContext);
}
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index 723eaee0391..ba0c161d100 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -23,7 +23,6 @@
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeepDeletedCells;
@@ -149,8 +148,7 @@ protected boolean skipRegionBoundaryCheck(Scan scan) {
}
@Override
- public void preScannerOpen(
- org.apache.hadoop.hbase.coprocessor.ObserverContext c, Scan scan)
+ public void preScannerOpen(org.apache.hadoop.hbase.coprocessor.ObserverContext c, Scan scan)
throws IOException {
byte[] txnScn = scan.getAttribute(BaseScannerRegionObserverConstants.TX_SCN);
if (txnScn != null) {
@@ -169,7 +167,8 @@ public void preScannerOpen(
// results otherwise while in other cases, it may just mean out client-side data
// on region boundaries is out of date and can safely be ignored.
if (!skipRegionBoundaryCheck(scan) || ScanUtil.isLocalIndex(scan)) {
- throwIfScanOutOfRegion(scan, c.getEnvironment().getRegion());
+ throwIfScanOutOfRegion(scan,
+ ((RegionCoprocessorEnvironment) c.getEnvironment()).getRegion());
}
// Muck with the start/stop row of the scan and set as reversed at the
// last possible moment. You need to swap the start/stop and make the
@@ -249,22 +248,22 @@ public void close() throws IOException {
}
@Override
- public boolean next(List result, ScannerContext scannerContext) throws IOException {
+ public boolean next(List result, ScannerContext scannerContext) throws IOException {
return nextInternal(result, scannerContext, false);
}
@Override
- public boolean next(List result) throws IOException {
+ public boolean next(List result) throws IOException {
overrideDelegate();
return super.next(result);
}
@Override
- public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException {
+ public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException {
return nextInternal(result, scannerContext, true);
}
- private boolean nextInternal(List result, ScannerContext scannerContext, boolean isRaw)
+ private boolean nextInternal(List result, ScannerContext scannerContext, boolean isRaw)
throws IOException {
overrideDelegate();
if (scannerContext instanceof PhoenixScannerContext) {
@@ -289,7 +288,7 @@ private boolean nextInternal(List result, ScannerContext scannerContext, b
}
@Override
- public boolean nextRaw(List result) throws IOException {
+ public boolean nextRaw(List result) throws IOException {
overrideDelegate();
return super.nextRaw(result);
}
@@ -310,9 +309,11 @@ public RegionScanner getNewRegionScanner(Scan scan) throws IOException {
* IOException is thrown, to prevent the coprocessor from becoming blacklisted.
*/
@Override
- public final RegionScanner postScannerOpen(final ObserverContext c,
- final Scan scan, final RegionScanner s) throws IOException {
+ public final RegionScanner postScannerOpen(final ObserverContext c, final Scan scan,
+ final RegionScanner s) throws IOException {
+ RegionCoprocessorEnvironment e = (RegionCoprocessorEnvironment) c.getEnvironment();
try {
+
if (!isRegionObserverFor(scan)) {
return s;
}
@@ -327,11 +328,10 @@ public final RegionScanner postScannerOpen(final ObserverContext c, Store store,
- ScanType scanType, ScanOptions options, CompactionLifeCycleTracker tracker,
- CompactionRequest request) throws IOException {
+ public void preCompactScannerOpen(ObserverContext c, Store store, ScanType scanType,
+ ScanOptions options, CompactionLifeCycleTracker tracker, CompactionRequest request)
+ throws IOException {
Configuration conf = c.getEnvironment().getConfiguration();
if (isPhoenixCompactionEnabled(conf)) {
setScanOptionsForFlushesAndCompactions(options);
@@ -401,8 +400,8 @@ public void preCompactScannerOpen(ObserverContext
}
@Override
- public void preFlushScannerOpen(ObserverContext c, Store store,
- ScanOptions options, FlushLifeCycleTracker tracker) throws IOException {
+ public void preFlushScannerOpen(ObserverContext c, Store store, ScanOptions options,
+ FlushLifeCycleTracker tracker) throws IOException {
Configuration conf = c.getEnvironment().getConfiguration();
if (isPhoenixCompactionEnabled(conf)) {
@@ -416,9 +415,8 @@ public void preFlushScannerOpen(ObserverContext c,
}
@Override
- public void preMemStoreCompactionCompactScannerOpen(
- ObserverContext c, Store store, ScanOptions options)
- throws IOException {
+ public void preMemStoreCompactionCompactScannerOpen(ObserverContext c, Store store,
+ ScanOptions options) throws IOException {
Configuration conf = c.getEnvironment().getConfiguration();
if (isPhoenixCompactionEnabled(conf)) {
setScanOptionsForFlushesAndCompactions(options);
@@ -444,8 +442,8 @@ public void preMemStoreCompactionCompactScannerOpen(
}
@Override
- public void preStoreScannerOpen(ObserverContext c, Store store,
- ScanOptions options) throws IOException {
+ public void preStoreScannerOpen(ObserverContext c, Store store, ScanOptions options)
+ throws IOException {
Configuration conf = c.getEnvironment().getConfiguration();
if (isPhoenixCompactionEnabled(conf)) {
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/CompactionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/CompactionScanner.java
index f12dc77f724..5125546af6c 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/CompactionScanner.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/CompactionScanner.java
@@ -434,7 +434,7 @@ private void postProcessForConditionalTTL(List result) {
}
@Override
- public boolean next(List result) throws IOException {
+ public boolean next(List result) throws IOException {
boolean hasMore = storeScanner.next(result);
inputCellCount += result.size();
if (!result.isEmpty()) {
@@ -453,7 +453,7 @@ public boolean next(List result) throws IOException {
}
@Override
- public boolean next(List result, ScannerContext scannerContext) throws IOException {
+ public boolean next(List result, ScannerContext scannerContext) throws IOException {
return next(result);
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
index 7edee4f4df9..e7bfcfe3b7a 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
@@ -29,6 +29,7 @@
import org.apache.hadoop.hbase.metrics.MetricRegistry;
import org.apache.hadoop.hbase.regionserver.OnlineRegions;
import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.phoenix.compat.hbase.CompatDelegateRegionCoprocessorEnvironment;
import org.apache.phoenix.util.ServerUtil.ConnectionFactory;
import org.apache.phoenix.util.ServerUtil.ConnectionType;
@@ -37,15 +38,15 @@
* clone the configuration provided by the HBase coprocessor environment before modifying it. So
* this class comes in handy where we have to return our custom config.
*/
-public class DelegateRegionCoprocessorEnvironment implements RegionCoprocessorEnvironment {
+public class DelegateRegionCoprocessorEnvironment extends CompatDelegateRegionCoprocessorEnvironment
+ implements RegionCoprocessorEnvironment {
private final Configuration config;
- private RegionCoprocessorEnvironment delegate;
private ConnectionType connectionType;
public DelegateRegionCoprocessorEnvironment(RegionCoprocessorEnvironment delegate,
ConnectionType connectionType) {
- this.delegate = delegate;
+ super(delegate);
this.connectionType = connectionType;
this.config =
ConnectionFactory.getTypeSpecificConfiguration(connectionType, delegate.getConfiguration());
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
index d4d26f73803..7263940146a 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
@@ -29,11 +29,9 @@
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
@@ -42,9 +40,7 @@
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker;
-import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.wal.WALEdit;
-import org.apache.hadoop.hbase.wal.WALKey;
public class DelegateRegionObserver implements RegionObserver {
@@ -55,262 +51,231 @@ public DelegateRegionObserver(RegionObserver delegate) {
}
@Override
- public void preOpen(ObserverContext c) throws IOException {
+ public void preOpen(ObserverContext c) throws IOException {
delegate.preOpen(c);
}
@Override
- public void postOpen(ObserverContext c) {
+ public void postOpen(ObserverContext c) {
delegate.postOpen(c);
}
@Override
- public void preFlush(
- org.apache.hadoop.hbase.coprocessor.ObserverContext c,
+ public void preFlush(org.apache.hadoop.hbase.coprocessor.ObserverContext c,
org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker tracker) throws IOException {
delegate.preFlush(c, tracker);
;
}
@Override
- public InternalScanner preFlush(
- org.apache.hadoop.hbase.coprocessor.ObserverContext c,
+ public InternalScanner preFlush(org.apache.hadoop.hbase.coprocessor.ObserverContext c,
Store store, InternalScanner scanner,
org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker tracker) throws IOException {
return delegate.preFlush(c, store, scanner, tracker);
}
@Override
- public void postFlush(
- org.apache.hadoop.hbase.coprocessor.ObserverContext c,
+ public void postFlush(org.apache.hadoop.hbase.coprocessor.ObserverContext c,
org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker tracker) throws IOException {
delegate.postFlush(c, tracker);
}
@Override
- public void postFlush(
- org.apache.hadoop.hbase.coprocessor.ObserverContext c,
- Store store, StoreFile resultFile,
- org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker tracker) throws IOException {
+ public void postFlush(org.apache.hadoop.hbase.coprocessor.ObserverContext c, Store store,
+ StoreFile resultFile, org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker tracker)
+ throws IOException {
delegate.postFlush(c, store, resultFile, tracker);
}
@Override
- public void preClose(ObserverContext c, boolean abortRequested)
- throws IOException {
+ public void preClose(ObserverContext c, boolean abortRequested) throws IOException {
delegate.preClose(c, abortRequested);
}
@Override
- public void postClose(ObserverContext c, boolean abortRequested) {
+ public void postClose(ObserverContext c, boolean abortRequested) {
delegate.postClose(c, abortRequested);
}
@Override
- public void preGetOp(ObserverContext c, Get get, List result)
- throws IOException {
+ public void preGetOp(ObserverContext c, Get get, List result) throws IOException {
delegate.preGetOp(c, get, result);
}
@Override
- public void postGetOp(ObserverContext c, Get get, List result)
- throws IOException {
+ public void postGetOp(ObserverContext c, Get get, List result) throws IOException {
delegate.postGetOp(c, get, result);
}
@Override
- public boolean preExists(ObserverContext c, Get get, boolean exists)
- throws IOException {
+ public boolean preExists(ObserverContext c, Get get, boolean exists) throws IOException {
return delegate.preExists(c, get, exists);
}
@Override
- public boolean postExists(ObserverContext c, Get get,
- boolean exists) throws IOException {
+ public boolean postExists(ObserverContext c, Get get, boolean exists) throws IOException {
return delegate.postExists(c, get, exists);
}
@Override
- public void prePut(ObserverContext c, Put put, WALEdit edit,
- Durability durability) throws IOException {
+ public void prePut(ObserverContext c, Put put, WALEdit edit, Durability durability)
+ throws IOException {
delegate.prePut(c, put, edit, durability);
}
@Override
- public void postPut(ObserverContext c, Put put, WALEdit edit,
- Durability durability) throws IOException {
+ public void postPut(ObserverContext c, Put put, WALEdit edit, Durability durability)
+ throws IOException {
delegate.postPut(c, put, edit, durability);
}
@Override
- public void preDelete(ObserverContext c, Delete delete,
- WALEdit edit, Durability durability) throws IOException {
+ public void preDelete(ObserverContext c, Delete delete, WALEdit edit, Durability durability)
+ throws IOException {
delegate.preDelete(c, delete, edit, durability);
}
@Override
- public void prePrepareTimeStampForDeleteVersion(ObserverContext c,
- Mutation mutation, Cell cell, byte[] byteNow, Get get) throws IOException {
+ public void prePrepareTimeStampForDeleteVersion(ObserverContext c, Mutation mutation, Cell cell,
+ byte[] byteNow, Get get) throws IOException {
delegate.prePrepareTimeStampForDeleteVersion(c, mutation, cell, byteNow, get);
}
@Override
- public void postDelete(ObserverContext c, Delete delete,
- WALEdit edit, Durability durability) throws IOException {
+ public void postDelete(ObserverContext c, Delete delete, WALEdit edit, Durability durability)
+ throws IOException {
delegate.postDelete(c, delete, edit, durability);
}
@Override
- public void preBatchMutate(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws IOException {
+ public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws IOException {
delegate.preBatchMutate(c, miniBatchOp);
}
@Override
- public void postBatchMutate(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws IOException {
+ public void postBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws IOException {
delegate.postBatchMutate(c, miniBatchOp);
}
@Override
- public void postStartRegionOperation(ObserverContext ctx,
- Operation operation) throws IOException {
+ public void postStartRegionOperation(ObserverContext ctx, Operation operation)
+ throws IOException {
delegate.postStartRegionOperation(ctx, operation);
}
@Override
- public void postCloseRegionOperation(ObserverContext ctx,
- Operation operation) throws IOException {
+ public void postCloseRegionOperation(ObserverContext ctx, Operation operation)
+ throws IOException {
delegate.postCloseRegionOperation(ctx, operation);
}
@Override
- public void postBatchMutateIndispensably(ObserverContext ctx,
- MiniBatchOperationInProgress miniBatchOp, boolean success) throws IOException {
+ public void postBatchMutateIndispensably(ObserverContext ctx,
+ MiniBatchOperationInProgress miniBatchOp, boolean success) throws IOException {
delegate.postBatchMutateIndispensably(ctx, miniBatchOp, success);
}
@Override
- public Result preAppend(ObserverContext c, Append append)
- throws IOException {
+ public Result preAppend(ObserverContext c, Append append) throws IOException {
return delegate.preAppend(c, append);
}
@Override
- public Result preAppendAfterRowLock(ObserverContext c,
- Append append) throws IOException {
+ public Result preAppendAfterRowLock(ObserverContext c, Append append) throws IOException {
return delegate.preAppendAfterRowLock(c, append);
}
@Override
- public Result postAppend(ObserverContext c, Append append,
- Result result) throws IOException {
+ public Result postAppend(ObserverContext c, Append append, Result result) throws IOException {
return delegate.postAppend(c, append, result);
}
@Override
- public Result preIncrement(ObserverContext c, Increment increment)
- throws IOException {
+ public Result preIncrement(ObserverContext c, Increment increment) throws IOException {
return delegate.preIncrement(c, increment);
}
@Override
- public Result preIncrementAfterRowLock(ObserverContext c,
- Increment increment) throws IOException {
+ public Result preIncrementAfterRowLock(ObserverContext c, Increment increment)
+ throws IOException {
return delegate.preIncrementAfterRowLock(c, increment);
}
@Override
- public Result postIncrement(ObserverContext c, Increment increment,
- Result result) throws IOException {
+ public Result postIncrement(ObserverContext c, Increment increment, Result result)
+ throws IOException {
return delegate.postIncrement(c, increment, result);
}
@Override
- public void preScannerOpen(
- org.apache.hadoop.hbase.coprocessor.ObserverContext c, Scan scan)
+ public void preScannerOpen(org.apache.hadoop.hbase.coprocessor.ObserverContext c, Scan scan)
throws IOException {
delegate.preScannerOpen(c, scan);
}
@Override
- public RegionScanner postScannerOpen(ObserverContext c, Scan scan,
- RegionScanner s) throws IOException {
+ public RegionScanner postScannerOpen(ObserverContext c, Scan scan, RegionScanner s)
+ throws IOException {
return delegate.postScannerOpen(c, scan, s);
}
@Override
- public boolean preScannerNext(ObserverContext c, InternalScanner s,
- List result, int limit, boolean hasNext) throws IOException {
+ public boolean preScannerNext(ObserverContext c, InternalScanner s, List result, int limit,
+ boolean hasNext) throws IOException {
return delegate.preScannerNext(c, s, result, limit, hasNext);
}
@Override
- public boolean postScannerNext(ObserverContext c, InternalScanner s,
- List result, int limit, boolean hasNext) throws IOException {
+ public boolean postScannerNext(ObserverContext c, InternalScanner s, List result, int limit,
+ boolean hasNext) throws IOException {
return delegate.postScannerNext(c, s, result, limit, hasNext);
}
@Override
- public void preScannerClose(ObserverContext c, InternalScanner s)
- throws IOException {
+ public void preScannerClose(ObserverContext c, InternalScanner s) throws IOException {
delegate.preScannerClose(c, s);
}
@Override
- public void postScannerClose(ObserverContext c, InternalScanner s)
- throws IOException {
+ public void postScannerClose(ObserverContext c, InternalScanner s) throws IOException {
delegate.postScannerClose(c, s);
}
@Override
- public void preWALRestore(ObserverContext extends RegionCoprocessorEnvironment> ctx,
- RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {
- delegate.preWALRestore(ctx, info, logKey, logEdit);
- }
-
- @Override
- public void postWALRestore(ObserverContext extends RegionCoprocessorEnvironment> ctx,
- RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {
- delegate.postWALRestore(ctx, info, logKey, logEdit);
- }
-
- @Override
- public void preBulkLoadHFile(ObserverContext ctx,
- List> familyPaths) throws IOException {
+ public void preBulkLoadHFile(ObserverContext ctx, List familyPaths) throws IOException {
delegate.preBulkLoadHFile(ctx, familyPaths);
}
@Override
- public Cell postMutationBeforeWAL(ObserverContext ctx,
- MutationType opType, Mutation mutation, Cell oldCell, Cell newCell) throws IOException {
+ public Cell postMutationBeforeWAL(ObserverContext ctx, MutationType opType, Mutation mutation,
+ Cell oldCell, Cell newCell) throws IOException {
return delegate.postMutationBeforeWAL(ctx, opType, mutation, oldCell, newCell);
}
@Override
- public DeleteTracker postInstantiateDeleteTracker(
- ObserverContext ctx, DeleteTracker delTracker)
+ public DeleteTracker postInstantiateDeleteTracker(ObserverContext ctx, DeleteTracker delTracker)
throws IOException {
return delegate.postInstantiateDeleteTracker(ctx, delTracker);
}
@Override
- public void preCommitStoreFile(ObserverContext ctx, byte[] family,
- List> pairs) throws IOException {
+ public void preCommitStoreFile(ObserverContext ctx, byte[] family, List pairs)
+ throws IOException {
delegate.preCommitStoreFile(ctx, family, pairs);
}
@Override
- public void postCommitStoreFile(ObserverContext ctx, byte[] family,
- Path srcPath, Path dstPath) throws IOException {
+ public void postCommitStoreFile(ObserverContext ctx, byte[] family, Path srcPath, Path dstPath)
+ throws IOException {
delegate.postCommitStoreFile(ctx, family, srcPath, dstPath);
}
@Override
- public void postBulkLoadHFile(ObserverContext ctx,
- List> stagingFamilyPaths, Map> finalPaths)
+ public void postBulkLoadHFile(ObserverContext ctx, List stagingFamilyPaths, Map finalPaths)
throws IOException {
delegate.postBulkLoadHFile(ctx, stagingFamilyPaths, finalPaths);
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java
index 4563f102fd7..6c28da957e9 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java
@@ -19,7 +19,6 @@
import java.io.IOException;
import java.util.List;
-import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Scan;
@@ -60,22 +59,22 @@ public long getMaxResultSize() {
}
@Override
- public boolean next(List result, ScannerContext scannerContext) throws IOException {
+ public boolean next(List result, ScannerContext scannerContext) throws IOException {
return next(result, false, scannerContext);
}
@Override
- public boolean next(List result) throws IOException {
+ public boolean next(List result) throws IOException {
return next(result, false, null);
}
@Override
- public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException {
+ public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException {
return next(result, true, scannerContext);
}
@Override
- public boolean nextRaw(List result) throws IOException {
+ public boolean nextRaw(List result) throws IOException {
return next(result, true, null);
}
@@ -97,8 +96,7 @@ public RegionScanner getNewRegionScanner(Scan scan) throws IOException {
}
}
- private boolean next(List result, boolean raw, ScannerContext scannerContext)
- throws IOException {
+ private boolean next(List result, boolean raw, ScannerContext scannerContext) throws IOException {
if (scannerContext != null) {
return raw ? delegate.nextRaw(result, scannerContext) : delegate.next(result, scannerContext);
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index b7163c0a3f8..b50493fd747 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -35,6 +35,7 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
@@ -334,7 +335,7 @@ public RegionScanner getScanner(final RegionScanner s) {
return new BaseRegionScanner(s) {
private int index = 0;
- public boolean next(List result, ScannerContext scannerContext) throws IOException {
+ public boolean next(List result, ScannerContext scannerContext) throws IOException {
return next(result);
}
@@ -348,7 +349,7 @@ public void close() throws IOException {
}
@Override
- public boolean next(List results) throws IOException {
+ public boolean next(List results) throws IOException {
if (index >= aggResults.size()) {
return false;
}
@@ -468,49 +469,32 @@ private UnorderedGroupByRegionScanner(final ObserverContext results, ScannerContext scannerContext) throws IOException {
+ public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException {
return next(results, scannerContext);
}
@Override
- public boolean next(List resultsToReturn) throws IOException {
+ public boolean next(List resultsToReturn) throws IOException {
return next(resultsToReturn, null);
}
@Override
- public boolean next(List resultsToReturn, ScannerContext scannerContext)
- throws IOException {
+ public boolean next(List resultsToReturn, ScannerContext scannerContext) throws IOException {
if (firstScan && actualScanStartRowKey != null) {
if (scanStartRowKey.length > 0 && !ScanUtil.isLocalIndex(scan)) {
if (hasRegionMoved()) {
LOGGER.info(
"Region has moved.. Actual scan start rowkey {} is not same "
- + "as current scan start rowkey {}",
+ + "as current scan start rowkey {}",
Bytes.toStringBinary(actualScanStartRowKey), Bytes.toStringBinary(scanStartRowKey));
- // If region has moved in the middle of the scan operation, after resetting
- // the scanner, hbase client uses (latest received rowkey + \x00) as new
- // start rowkey for resuming the scan operation on the new scanner.
- if (
- Bytes.compareTo(ByteUtil.concat(actualScanStartRowKey, ByteUtil.ZERO_BYTE),
- scanStartRowKey) == 0
- ) {
- scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY,
- actualScanStartRowKey);
- scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY_INCLUDE,
- Bytes.toBytes(actualScanIncludeStartRowKey));
- } else {
- // This happens when the server side scanner has already sent some
- // rows back to the client and region has moved, so now we need to
- // use skipValidRowsSent flag and also reset the scanner
- // at paging region scanner level to re-read the previously sent
- // values in order to re-compute the aggregation and then return
- // only the next rowkey that was not yet sent back to the client.
- skipValidRowsSent = true;
- scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY,
- actualScanStartRowKey);
- scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY_INCLUDE,
- Bytes.toBytes(actualScanIncludeStartRowKey));
- }
+ // The region has moved during scan, so the HBase client creates a new scan.
+ // We need to restart the scan, and optionally skip any rows already received by the
+ // client
+ skipValidRowsSent = true;
+ scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY,
+ actualScanStartRowKey);
+ scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY_INCLUDE,
+ Bytes.toBytes(actualScanIncludeStartRowKey));
}
}
}
@@ -522,80 +506,149 @@ public boolean next(List resultsToReturn, ScannerContext scannerContext)
return true;
}
if (skipValidRowsSent) {
- while (true) {
- if (!moreRows) {
- skipValidRowsSent = false;
- if (resultsToReturn.size() > 0) {
- lastReturnedRowKey = CellUtil.cloneRow(resultsToReturn.get(0));
- }
- return moreRows;
- }
- Cell firstCell = resultsToReturn.get(0);
- byte[] resultRowKey = new byte[firstCell.getRowLength()];
- System.arraycopy(firstCell.getRowArray(), firstCell.getRowOffset(), resultRowKey, 0,
- resultRowKey.length);
- // In case of regular scans, if the region moves and scanner is reset,
- // hbase client checks the last returned row by the server, gets the
- // rowkey and appends "\x00" byte, before resuming the scan. With this,
- // scan includeStartRowKey is set to true.
- // However, same is not the case with reverse scans. For the reverse scan,
- // hbase client checks the last returned row by the server, gets the
- // rowkey and treats it as startRowKey for resuming the scan. With this,
- // scan includeStartRowKey is set to false.
- // Hence, we need to cover both cases here.
- if (Bytes.compareTo(resultRowKey, scanStartRowKey) == 0) {
- // This can be true for reverse scan case.
- skipValidRowsSent = false;
- if (includeStartRowKey) {
- if (resultsToReturn.size() > 0) {
- lastReturnedRowKey = CellUtil.cloneRow(resultsToReturn.get(0));
- }
- return moreRows;
- }
- // If includeStartRowKey is false and the current rowkey is matching
- // with scanStartRowKey, return the next row result.
- resultsToReturn.clear();
- moreRows = nextInternal(resultsToReturn, scannerContext);
- if (ScanUtil.isDummy(resultsToReturn)) {
- return true;
- }
- if (resultsToReturn.size() > 0) {
- lastReturnedRowKey = CellUtil.cloneRow(resultsToReturn.get(0));
- }
- return moreRows;
- } else if (
- Bytes.compareTo(ByteUtil.concat(resultRowKey, ByteUtil.ZERO_BYTE), scanStartRowKey) == 0
- ) {
- // This can be true for regular scan case.
- skipValidRowsSent = false;
- if (includeStartRowKey) {
- // If includeStartRowKey is true and the (current rowkey + "\0xx") is
- // matching with scanStartRowKey, return the next row result.
- resultsToReturn.clear();
- moreRows = nextInternal(resultsToReturn, scannerContext);
- if (ScanUtil.isDummy(resultsToReturn)) {
- return true;
- }
- if (resultsToReturn.size() > 0) {
- lastReturnedRowKey = CellUtil.cloneRow(resultsToReturn.get(0));
- }
- return moreRows;
+ Iterator resultIt = resultsToReturn.iterator();
+ while (resultIt.hasNext()) {
+ Cell resultElem = (Cell) resultIt.next();
+ byte[] resultRowKey = CellUtil.cloneRow(resultElem);
+ int compare = Bytes.compareTo(resultRowKey, scanStartRowKey);
+ if ( (scan.isReversed() && compare > 0) || (!scan.isReversed() && compare < 0) || (compare == 0 && !includeStartRowKey)) {
+ resultIt.remove();
+ } else {
+ skipValidRowsSent = false;
+ break;
}
- }
- // In the loop, keep iterating through rows.
- resultsToReturn.clear();
- moreRows = nextInternal(resultsToReturn, scannerContext);
- if (ScanUtil.isDummy(resultsToReturn)) {
- return true;
- }
}
}
- if (resultsToReturn.size() > 0) {
- lastReturnedRowKey = CellUtil.cloneRow(resultsToReturn.get(0));
+ if (resultsToReturn.isEmpty() && moreRows) {
+ // TODO should we iterate further here ?
+ return getDummyResult(resultsToReturn);
+ }
+ if (!resultsToReturn.isEmpty()) {
+ lastReturnedRowKey =
+ CellUtil.cloneRow((Cell) resultsToReturn.get(resultsToReturn.size() - 1));
}
return moreRows;
}
+
+// @Override
+// public boolean next(List resultsToReturn, ScannerContext scannerContext) throws IOException {
+// if (firstScan && actualScanStartRowKey != null) {
+// if (scanStartRowKey.length > 0 && !ScanUtil.isLocalIndex(scan)) {
+// if (hasRegionMoved()) {
+// LOGGER.info(
+// "Region has moved.. Actual scan start rowkey {} is not same "
+// + "as current scan start rowkey {}",
+// Bytes.toStringBinary(actualScanStartRowKey), Bytes.toStringBinary(scanStartRowKey));
+// // If region has moved in the middle of the scan operation, after resetting
+// // the scanner, hbase client uses (latest received rowkey + \x00) as new
+// // start rowkey for resuming the scan operation on the new scanner.
+// if (
+// Bytes.compareTo(ByteUtil.concat(actualScanStartRowKey, ByteUtil.ZERO_BYTE),
+// scanStartRowKey) == 0
+// ) {
+// scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY,
+// actualScanStartRowKey);
+// scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY_INCLUDE,
+// Bytes.toBytes(actualScanIncludeStartRowKey));
+// } else {
+// // This happens when the server side scanner has already sent some
+// // rows back to the client and region has moved, so now we need to
+// // use skipValidRowsSent flag and also reset the scanner
+// // at paging region scanner level to re-read the previously sent
+// // values in order to re-compute the aggregation and then return
+// // only the next rowkey that was not yet sent back to the client.
+// skipValidRowsSent = true;
+// scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY,
+// actualScanStartRowKey);
+// scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY_INCLUDE,
+// Bytes.toBytes(actualScanIncludeStartRowKey));
+// }
+// }
+// }
+// }
+// if (firstScan) {
+// firstScan = false;
+// }
+// boolean moreRows = nextInternal(resultsToReturn, scannerContext);
+// if (ScanUtil.isDummy(resultsToReturn)) {
+// return true;
+// }
+// if (skipValidRowsSent) {
+// while (true) {
+// if (!moreRows) {
+// skipValidRowsSent = false;
+// if (resultsToReturn.size() > 0) {
+// lastReturnedRowKey = CellUtil.cloneRow((Cell) resultsToReturn.get(0));
+// }
+// return moreRows;
+// }
+// Cell firstCell = (Cell) resultsToReturn.get(0);
+// byte[] resultRowKey = new byte[firstCell.getRowLength()];
+// System.arraycopy(firstCell.getRowArray(), firstCell.getRowOffset(), resultRowKey, 0,
+// resultRowKey.length);
+// // In case of regular scans, if the region moves and scanner is reset,
+// // hbase client checks the last returned row by the server, gets the
+// // rowkey and appends "\x00" byte, before resuming the scan. With this,
+// // scan includeStartRowKey is set to true.
+// // However, same is not the case with reverse scans. For the reverse scan,
+// // hbase client checks the last returned row by the server, gets the
+// // rowkey and treats it as startRowKey for resuming the scan. With this,
+// // scan includeStartRowKey is set to false.
+// // Hence, we need to cover both cases here.
+// if (Bytes.compareTo(resultRowKey, scanStartRowKey) == 0) {
+// // This can be true for reverse scan case.
+// skipValidRowsSent = false;
+// if (includeStartRowKey) {
+// if (resultsToReturn.size() > 0) {
+// lastReturnedRowKey = CellUtil.cloneRow((Cell) resultsToReturn.get(0));
+// }
+// return moreRows;
+// }
+// // If includeStartRowKey is false and the current rowkey is matching
+// // with scanStartRowKey, return the next row result.
+// resultsToReturn.clear();
+// moreRows = nextInternal(resultsToReturn, scannerContext);
+// if (ScanUtil.isDummy(resultsToReturn)) {
+// return true;
+// }
+// if (resultsToReturn.size() > 0) {
+// lastReturnedRowKey = CellUtil.cloneRow((Cell) resultsToReturn.get(0));
+// }
+// return moreRows;
+// } else if (
+// Bytes.compareTo(ByteUtil.concat(resultRowKey, ByteUtil.ZERO_BYTE), scanStartRowKey) == 0
+// ) {
+// // This can be true for regular scan case.
+// skipValidRowsSent = false;
+// if (includeStartRowKey) {
+// // If includeStartRowKey is true and the (current rowkey + "\0xx") is
+// // matching with scanStartRowKey, return the next row result.
+// resultsToReturn.clear();
+// moreRows = nextInternal(resultsToReturn, scannerContext);
+// if (ScanUtil.isDummy(resultsToReturn)) {
+// return true;
+// }
+// if (resultsToReturn.size() > 0) {
+// lastReturnedRowKey = CellUtil.cloneRow((Cell) resultsToReturn.get(0));
+// }
+// return moreRows;
+// }
+// }
+// // In the loop, keep iterating through rows.
+// resultsToReturn.clear();
+// moreRows = nextInternal(resultsToReturn, scannerContext);
+// if (ScanUtil.isDummy(resultsToReturn)) {
+// return true;
+// }
+// }
+// }
+// if (resultsToReturn.size() > 0) {
+// lastReturnedRowKey = CellUtil.cloneRow((Cell) resultsToReturn.get(0));
+// }
+// return moreRows;
+// }
+
/**
* Perform the next operation to grab the next row's worth of values.
* @param resultsToReturn output list of cells that are read as part of this operation.
@@ -771,17 +824,17 @@ private OrderedGroupByRegionScanner(final ObserverContext results, ScannerContext scannerContext) throws IOException {
+ public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException {
return next(results, scannerContext);
}
@Override
- public boolean next(List results) throws IOException {
+ public boolean next(List results) throws IOException {
return next(results, null);
}
@Override
- public boolean next(List results, ScannerContext scannerContext) throws IOException {
+ public boolean next(List results, ScannerContext scannerContext) throws IOException {
boolean hasMore;
boolean atLimit;
boolean aggBoundary = false;
@@ -899,7 +952,7 @@ public boolean next(List results, ScannerContext scannerContext) throws IO
// Continue if there are more
if (!atLimit && (hasMore || aggBoundary)) {
if (!results.isEmpty()) {
- previousResultRowKey = CellUtil.cloneRow(results.get(results.size() - 1));
+ previousResultRowKey = CellUtil.cloneRow((Cell) results.get(results.size() - 1));
}
return true;
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
index ab62a7ddf7b..deacb55668d 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
@@ -294,12 +294,12 @@ public boolean isFilterDone() throws IOException {
}
@Override
- public boolean nextRaw(List result) throws IOException {
+ public boolean nextRaw(List result) throws IOException {
return next(result, true, null);
}
@Override
- public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException {
+ public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException {
return next(result, true, scannerContext);
}
@@ -351,12 +351,12 @@ public void close() throws IOException {
}
@Override
- public boolean next(List result) throws IOException {
+ public boolean next(List result) throws IOException {
return next(result, false, null);
}
@Override
- public boolean next(List result, ScannerContext scannerContext) throws IOException {
+ public boolean next(List result, ScannerContext scannerContext) throws IOException {
return next(result, false, scannerContext);
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexRebuildRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexRebuildRegionScanner.java
index e1e1e486dcf..e1825263690 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexRebuildRegionScanner.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexRebuildRegionScanner.java
@@ -295,7 +295,7 @@ private void verifyAndOrRebuildIndex(Map> indexMutationMa
}
@Override
- public boolean next(List results) throws IOException {
+ public boolean next(List results) throws IOException {
if (
indexRowKeyforReadRepair != null
&& singleRowRebuildReturnCode == GlobalIndexChecker.RebuildReturnCode.NO_DATA_ROW.getValue()
@@ -450,7 +450,7 @@ public boolean next(List results) throws IOException {
return hasMore || hasMoreIncr;
}
- public boolean next(List result, ScannerContext scannerContext) throws IOException {
+ public boolean next(List result, ScannerContext scannerContext) throws IOException {
return next(result);
}
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexRepairRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexRepairRegionScanner.java
index 35b61e9ac49..43bcec8a45f 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexRepairRegionScanner.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexRepairRegionScanner.java
@@ -433,7 +433,7 @@ private int populateIndexMutationFromIndexRow(List row,
}
@Override
- public boolean next(List results) throws IOException {
+ public boolean next(List results) throws IOException {
Map> indexMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
Cell lastCell = null;
int rowCount = 0;
@@ -524,7 +524,7 @@ public boolean next(List results) throws IOException {
return hasMore || hasMoreIncr;
}
- public boolean next(List result, ScannerContext scannerContext) throws IOException {
+ public boolean next(List result, ScannerContext scannerContext) throws IOException {
return next(result);
}
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexerRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexerRegionScanner.java
index 0b59c6535aa..56396f970b5 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexerRegionScanner.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexerRegionScanner.java
@@ -358,7 +358,7 @@ private byte[] commitIfReady(byte[] uuidValue,
}
@Override
- public boolean next(List results) throws IOException {
+ public boolean next(List results) throws IOException {
Cell lastCell = null;
int rowCount = 0;
byte[] encodedRegionName = region.getRegionInfo().getEncodedNameAsBytes();
@@ -472,7 +472,7 @@ public long getMaxResultSize() {
return scan.getMaxResultSize();
}
- public boolean next(List result, ScannerContext scannerContext) throws IOException {
+ public boolean next(List result, ScannerContext scannerContext) throws IOException {
return next(result);
}
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index c8ee55138c5..84452467d7d 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -135,6 +135,7 @@
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.ExtendedCellBuilder;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
@@ -602,6 +603,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
private static final int MIN_VALUE_INDEX = FUNCTION_ARG_KV_COLUMNS.indexOf(MIN_VALUE_KV);
private static final int MAX_VALUE_INDEX = FUNCTION_ARG_KV_COLUMNS.indexOf(MAX_VALUE_KV);
+ // No longer public in HBase 3+
+ private static final long OLDEST_TIMESTAMP = Long.MIN_VALUE;
+
public static PName newPName(byte[] buffer) {
return buffer == null ? null : newPName(buffer, 0, buffer.length);
}
@@ -869,10 +873,7 @@ private List buildSchemas(List keys, Region region, long client
keyRanges.add(PVarbinary.INSTANCE.getKeyRange(key, true, stopKey, false, SortOrder.ASC));
}
Scan scan = new Scan();
- if (
- clientTimeStamp != HConstants.LATEST_TIMESTAMP
- && clientTimeStamp != HConstants.OLDEST_TIMESTAMP
- ) {
+ if (clientTimeStamp != HConstants.LATEST_TIMESTAMP && clientTimeStamp != OLDEST_TIMESTAMP) {
scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp + 1);
} else {
scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp);
@@ -1405,7 +1406,9 @@ private PTable getTableFromCells(List tableCellList, List> allC
? rowKeyOrderOptimizable
: oldTable != null && oldTable.rowKeyOrderOptimizable());
- Cell updateCacheFrequencyKv = tableKeyValues[UPDATE_CACHE_FREQUENCY_INDEX];
+ // FIXME blind cast, may fail with synthetic cells
+ ExtendedCell updateCacheFrequencyKv =
+ (ExtendedCell) tableKeyValues[UPDATE_CACHE_FREQUENCY_INDEX];
long updateCacheFrequency = updateCacheFrequencyKv == null
? 0
: PLong.INSTANCE.getCodec().decodeLong(updateCacheFrequencyKv.getValueArray(),
@@ -1480,7 +1483,9 @@ private PTable getTableFromCells(List tableCellList, List> allC
: oldTable != null ? oldTable.getEncodingScheme()
: QualifierEncodingScheme.NON_ENCODED_QUALIFIERS);
- Cell useStatsForParallelizationKv = tableKeyValues[USE_STATS_FOR_PARALLELIZATION_INDEX];
+ // FIXME blind cast, may fail with synthetic cells
+ ExtendedCell useStatsForParallelizationKv =
+ (ExtendedCell) tableKeyValues[USE_STATS_FOR_PARALLELIZATION_INDEX];
Boolean useStatsForParallelization = useStatsForParallelizationKv == null
? null
: Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(useStatsForParallelizationKv.getValueArray(),
@@ -2134,7 +2139,7 @@ private PTable buildDeletedTable(byte[] key, ImmutableBytesPtr cacheKey, Region
scanner.next(results);
}
for (Cell kv : results) {
- KeyValue.Type type = Type.codeToType(kv.getTypeByte());
+ KeyValue.Type type = Type.codeToType(kv.getType().getCode());
if (type == Type.DeleteFamily) { // Row was deleted
Cache metaDataCache =
GlobalCache.getInstance(this.env).getMetaDataCache();
@@ -2164,7 +2169,7 @@ private PFunction buildDeletedFunction(byte[] key, ImmutableBytesPtr cacheKey, R
// HBase ignores the time range on a raw scan (HBASE-7362)
if (!results.isEmpty() && results.get(0).getTimestamp() > clientTimeStamp) {
Cell kv = results.get(0);
- if (kv.getTypeByte() == Type.Delete.getCode()) {
+ if (kv.getType().getCode() == Type.Delete.getCode()) {
Cache metaDataCache =
GlobalCache.getInstance(this.env).getMetaDataCache();
PFunction function = newDeletedFunctionMarker(kv.getTimestamp());
@@ -2193,7 +2198,7 @@ private PSchema buildDeletedSchema(byte[] key, ImmutableBytesPtr cacheKey, Regio
// HBase ignores the time range on a raw scan (HBASE-7362)
if (!results.isEmpty() && results.get(0).getTimestamp() > clientTimeStamp) {
Cell kv = results.get(0);
- if (kv.getTypeByte() == Type.Delete.getCode()) {
+ if (kv.getType().getCode() == Type.Delete.getCode()) {
Cache metaDataCache =
GlobalCache.getInstance(this.env).getMetaDataCache();
PSchema schema = newDeletedSchemaMarker(kv.getTimestamp());
@@ -2632,7 +2637,8 @@ public void createTable(RpcController controller, CreateTableRequest request,
) {
Table hTable =
ServerUtil.getHTableForCoprocessorScan(env, TableName.valueOf(cPhysicalName));
- ColumnFamilyDescriptor cfd = hTable.getTableDescriptor().getColumnFamilies()[0];
+ ColumnFamilyDescriptor cfd = hTable.getDescriptor().getColumnFamilies()[0];
+
UpgradeUtil.addTTLForClientOlderThan530(tableMetadata, tableKey, clientTimeStamp,
clientVersion, cfd);
}
@@ -2936,9 +2942,9 @@ private void exportSchema(List tableMetadata, byte[] tableKey, long cl
List> allColumnsCellList =
MetaDataUtil.getColumnAndLinkCellsFromMutations(tableMetadata);
// getTableFromCells assumes the Cells are sorted as they would be when reading from HBase
- Collections.sort(tableCellList, KeyValue.COMPARATOR);
+ Collections.sort(tableCellList, CellComparator.getInstance());
for (List columnCellList : allColumnsCellList) {
- Collections.sort(columnCellList, KeyValue.COMPARATOR);
+ Collections.sort(columnCellList, CellComparator.getInstance());
}
PTable newTable = getTableFromCells(tableCellList, allColumnsCellList, clientTimestamp,
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index 3195b8d8f6a..32c41a413ec 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -124,10 +124,10 @@ public class MetaDataRegionObserver implements RegionObserver, RegionCoprocessor
private long statsTruncateTaskDelay;
@Override
- public void preClose(final ObserverContext c,
- boolean abortRequested) {
+ public void preClose(final ObserverContext c, boolean abortRequested) {
executor.shutdownNow();
- GlobalCache.getInstance(c.getEnvironment()).getMetaDataCache().invalidateAll();
+ GlobalCache.getInstance((RegionCoprocessorEnvironment) c.getEnvironment()).getMetaDataCache()
+ .invalidateAll();
}
@Override
@@ -162,8 +162,8 @@ public void start(CoprocessorEnvironment env) throws IOException {
}
@Override
- public void postOpen(ObserverContext e) {
- final RegionCoprocessorEnvironment env = e.getEnvironment();
+ public void postOpen(ObserverContext e) {
+ final RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e.getEnvironment();
Runnable r = new Runnable() {
@Override
@@ -220,12 +220,16 @@ public Void run() throws Exception {
return;
}
// Ensure we only run one of the index rebuilder tasks
- if (ServerUtil.isKeyInRegion(SYSTEM_CATALOG_KEY, e.getEnvironment().getRegion())) {
+ if (
+ ServerUtil.isKeyInRegion(SYSTEM_CATALOG_KEY,
+ ((RegionCoprocessorEnvironment) e.getEnvironment()).getRegion())
+ ) {
try {
Class.forName(PhoenixDriver.class.getName());
initRebuildIndexConnectionProps(e.getEnvironment().getConfiguration());
// starts index rebuild schedule work
- BuildIndexScheduleTask task = new BuildIndexScheduleTask(e.getEnvironment());
+ BuildIndexScheduleTask task =
+ new BuildIndexScheduleTask((RegionCoprocessorEnvironment) e.getEnvironment());
executor.scheduleWithFixedDelay(task, initialRebuildTaskDelay, rebuildIndexTimeInterval,
TimeUnit.MILLISECONDS);
} catch (ClassNotFoundException ex) {
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PagingRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PagingRegionScanner.java
index 7cde2a393f4..f2749f625b4 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PagingRegionScanner.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PagingRegionScanner.java
@@ -21,8 +21,6 @@
import java.io.IOException;
import java.util.List;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.regionserver.BloomType;
@@ -31,6 +29,7 @@
import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.apache.hadoop.hbase.regionserver.ScannerContext;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compat.hbase.CompatUtil;
import org.apache.phoenix.filter.PagingFilter;
import org.apache.phoenix.filter.SkipScanFilter;
import org.apache.phoenix.query.KeyRange;
@@ -129,7 +128,7 @@ private boolean hasMore() {
return lookupPosition < pointLookupRanges.size();
}
- private boolean next(List results, boolean raw, RegionScanner scanner,
+ private boolean next(List results, boolean raw, RegionScanner scanner,
ScannerContext scannerContext) throws IOException {
try {
while (true) {
@@ -218,7 +217,7 @@ void init() throws IOException {
initialized = true;
}
- private boolean next(List results, boolean raw, ScannerContext scannerContext)
+ private boolean next(List results, boolean raw, ScannerContext scannerContext)
throws IOException {
init();
if (pagingFilter != null) {
@@ -240,7 +239,7 @@ private boolean next(List results, boolean raw, ScannerContext scannerCont
long mvccReadPoint = delegate.getMvccReadPoint();
delegate.close();
scan.withStartRow(adjustedStartRowKey, Bytes.toBoolean(adjustedStartRowKeyIncludeBytes));
- PackagePrivateFieldAccessor.setMvccReadPoint(scan, mvccReadPoint);
+ CompatUtil.setMvccReadPoint(scan, mvccReadPoint);
if (
multiKeyPointLookup != null && !multiKeyPointLookup.verifyStartRowKey(adjustedStartRowKey)
) {
@@ -303,22 +302,22 @@ private boolean next(List results, boolean raw, ScannerContext scannerCont
}
@Override
- public boolean next(List results) throws IOException {
+ public boolean next(List results) throws IOException {
return next(results, false, null);
}
@Override
- public boolean nextRaw(List results) throws IOException {
+ public boolean nextRaw(List results) throws IOException {
return next(results, true, null);
}
@Override
- public boolean next(List results, ScannerContext scannerContext) throws IOException {
+ public boolean next(List results, ScannerContext scannerContext) throws IOException {
return next(results, false, scannerContext);
}
@Override
- public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException {
+ public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException {
return next(results, true, scannerContext);
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index 770566e50e7..faff5826246 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -49,7 +49,6 @@
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.ipc.RpcUtil;
import org.apache.hadoop.hbase.ipc.ServerRpcController;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
@@ -64,6 +63,7 @@
import org.apache.hadoop.hbase.security.access.Permission;
import org.apache.hadoop.hbase.security.access.Permission.Action;
import org.apache.hadoop.hbase.security.access.UserPermission;
+import org.apache.phoenix.compat.hbase.CompatUtil;
import org.apache.phoenix.coprocessor.PhoenixMetaDataCoprocessorHost.PhoenixMetaDataControllerEnvironment;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
@@ -571,7 +571,7 @@ private void getUserPermsFromUserDefinedAccessController(
AccessControlProtos.GetUserPermissionsRequest.Builder builderTablePerms =
AccessControlProtos.GetUserPermissionsRequest.newBuilder();
- builderTablePerms.setTableName(ProtobufUtil.toProtoTableName(tableName));
+ builderTablePerms.setTableName(CompatUtil.toProtoTableName(tableName));
builderTablePerms.setType(AccessControlProtos.Permission.Type.Table);
AccessControlProtos.GetUserPermissionsRequest requestTablePerms = builderTablePerms.build();
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
index 17419b87553..62ad42d4576 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
@@ -71,16 +71,16 @@ public Optional getRegionObserver() {
}
@Override
- public void preBatchMutate(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws IOException {
+ public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws IOException {
+ RegionCoprocessorEnvironment e = (RegionCoprocessorEnvironment) c.getEnvironment();
try {
preBatchMutateWithExceptions(miniBatchOp,
- c.getEnvironment().getRegion().getTableDescriptor().getTableName().getNameAsString());
+ e.getRegion().getTableDescriptor().getTableName().getNameAsString());
} catch (Throwable t) {
// Wrap all exceptions in an IOException to prevent region server crashes
- throw ClientUtil
- .createIOException("Unable to Put cells corresponding to dynamic" + "column metadata for "
- + c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString(), t);
+ throw ClientUtil.createIOException("Unable to Put cells corresponding to dynamic"
+ + "column metadata for " + e.getRegion().getRegionInfo().getTable().getNameAsString(), t);
}
}
@@ -163,16 +163,15 @@ private static byte[] getQualifierForDynamicColumnMetaDataCell(PTableProtos.PCol
}
@Override
- protected RegionScanner doPostScannerOpen(final ObserverContext c,
- final Scan scan, final RegionScanner s) throws Throwable {
- NonAggregateRegionScannerFactory nonAggregateROUtil =
- new NonAggregateRegionScannerFactory(c.getEnvironment());
+ protected RegionScanner doPostScannerOpen(final ObserverContext c, final Scan scan,
+ final RegionScanner s) throws Throwable {
+ RegionCoprocessorEnvironment e = (RegionCoprocessorEnvironment) c.getEnvironment();
+ NonAggregateRegionScannerFactory nonAggregateROUtil = new NonAggregateRegionScannerFactory(e);
if (
scan.getAttribute(BaseScannerRegionObserverConstants.READ_REPAIR_TRANSFORMING_TABLE) != null
) {
readRepairTransformingTable = true;
- globalIndexScanner =
- globalIndexChecker.new GlobalIndexScanner(c.getEnvironment(), scan, s, metricsSource);
+ globalIndexScanner = globalIndexChecker.new GlobalIndexScanner(e, scan, s, metricsSource);
return nonAggregateROUtil.getRegionScanner(scan, globalIndexScanner);
}
return nonAggregateROUtil.getRegionScanner(scan, s);
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java
index 0bbe1714867..ccf9de09e54 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java
@@ -104,10 +104,9 @@ private static Result getErrorResult(byte[] row, long timestamp, int errorCode)
* @since 3.0.0
*/
@Override
- public Result preIncrement(
- org.apache.hadoop.hbase.coprocessor.ObserverContext e,
+ public Result preIncrement(org.apache.hadoop.hbase.coprocessor.ObserverContext e,
Increment increment) throws IOException {
- RegionCoprocessorEnvironment env = e.getEnvironment();
+ RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e.getEnvironment();
// We need to set this to prevent region.increment from being called
e.bypass();
Region region = env.getRegion();
@@ -342,9 +341,8 @@ private Cell createKeyValue(byte[] key, byte[] cqBytes, boolean value, long time
*/
@SuppressWarnings("deprecation")
@Override
- public Result preAppend(
- org.apache.hadoop.hbase.coprocessor.ObserverContext e,
- Append append) throws IOException {
+ public Result preAppend(org.apache.hadoop.hbase.coprocessor.ObserverContext e, Append append)
+ throws IOException {
byte[] opBuf = append.getAttribute(OPERATION_ATTRIB);
if (opBuf == null) {
return null;
@@ -383,7 +381,7 @@ public Result preAppend(
}
}
- RegionCoprocessorEnvironment env = e.getEnvironment();
+ RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e.getEnvironment();
// We need to set this to prevent region.append from being called
e.bypass();
Region region = env.getRegion();
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/SystemCatalogRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/SystemCatalogRegionObserver.java
index d547cc4738f..e85fc587e8f 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/SystemCatalogRegionObserver.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/SystemCatalogRegionObserver.java
@@ -24,7 +24,6 @@
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
import org.apache.phoenix.filter.SystemCatalogViewIndexIdFilter;
import org.apache.phoenix.util.ScanUtil;
@@ -34,8 +33,7 @@
*/
public class SystemCatalogRegionObserver implements RegionObserver, RegionCoprocessor {
@Override
- public void preScannerOpen(ObserverContext e, Scan scan)
- throws IOException {
+ public void preScannerOpen(ObserverContext c, Scan scan) throws IOException {
int clientVersion = ScanUtil.getClientVersion(scan);
/*
* ScanUtil.getClientVersion returns UNKNOWN_CLIENT_VERSION if the phoenix client version isn't
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TTLRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TTLRegionScanner.java
index 6ed234db6d7..3ece9450c59 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TTLRegionScanner.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TTLRegionScanner.java
@@ -121,13 +121,13 @@ private void setTTLContextForRow(List result) {
ttl *= 1000;
}
- private boolean isExpired(List result) throws IOException {
+ private boolean isExpired(List result) throws IOException {
long maxTimestamp = 0;
long minTimestamp = Long.MAX_VALUE;
long ts;
boolean found = false;
setTTLContextForRow(result);
- for (Cell c : result) {
+ for (Cell c : (List) result) {
ts = c.getTimestamp();
if (!found && ScanUtil.isEmptyColumn(c, emptyCF, emptyCQ)) {
if (ts < ttlWindowStart) {
@@ -180,7 +180,7 @@ private boolean isExpired(List result) throws IOException {
row.clear(); // reset the row on every iteration
Scan singleRowScan = new Scan();
singleRowScan.setTimeRange(wndStartTS, wndEndTS);
- byte[] rowKey = CellUtil.cloneRow(result.get(0));
+ byte[] rowKey = CellUtil.cloneRow((Cell) result.get(0));
singleRowScan.withStartRow(rowKey, true);
singleRowScan.withStopRow(rowKey, true);
RegionScanner scanner = ((DelegateRegionScanner) delegate).getNewRegionScanner(singleRowScan);
@@ -286,22 +286,22 @@ private boolean next(List result, boolean raw, ScannerContext scannerConte
}
@Override
- public boolean next(List results) throws IOException {
+ public boolean next(List results) throws IOException {
return next(results, false, null);
}
@Override
- public boolean nextRaw(List results) throws IOException {
+ public boolean nextRaw(List results) throws IOException {
return next(results, true, null);
}
@Override
- public boolean next(List results, ScannerContext scannerContext) throws IOException {
+ public boolean next(List results, ScannerContext scannerContext) throws IOException {
return next(results, false, scannerContext);
}
@Override
- public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException {
+ public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException {
return next(results, true, scannerContext);
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TaskRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TaskRegionObserver.java
index dc6055862ec..82bf4c65fb1 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TaskRegionObserver.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TaskRegionObserver.java
@@ -112,8 +112,7 @@ public String toString() {
}
@Override
- public void preClose(final ObserverContext c,
- boolean abortRequested) {
+ public void preClose(final ObserverContext c, boolean abortRequested) {
executor.shutdownNow();
}
@@ -134,10 +133,11 @@ public void start(CoprocessorEnvironment env) throws IOException {
}
@Override
- public void postOpen(ObserverContext e) {
- final RegionCoprocessorEnvironment env = e.getEnvironment();
+ public void postOpen(ObserverContext e) {
+ final RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e.getEnvironment();
- SelfHealingTask task = new SelfHealingTask(e.getEnvironment(), timeMaxInterval);
+ SelfHealingTask task =
+ new SelfHealingTask((RegionCoprocessorEnvironment) e.getEnvironment(), timeMaxInterval);
executor.scheduleWithFixedDelay(task, initialDelay, timeInterval, TimeUnit.MILLISECONDS);
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredIndexRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredIndexRegionScanner.java
index 1b126a6591c..98ab0e2ca7e 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredIndexRegionScanner.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredIndexRegionScanner.java
@@ -384,7 +384,7 @@ protected boolean getNextCoveredIndexRow(List result) throws IOException {
}
}
- public boolean next(List result) throws IOException {
+ public boolean next(List result) throws IOException {
return next(result, null);
}
@@ -398,7 +398,7 @@ public boolean next(List result) throws IOException {
* @return boolean to indicate if there are more rows to scan
*/
@Override
- public boolean next(List result, ScannerContext scannerContext) throws IOException {
+ public boolean next(List result, ScannerContext scannerContext) throws IOException {
long startTime = (scannerContext != null)
? ((PhoenixScannerContext) scannerContext).getStartTime()
: EnvironmentEdgeManager.currentTimeMillis();
@@ -431,7 +431,7 @@ public boolean next(List result, ScannerContext scannerContext) throws IOE
if (state == State.READY) {
boolean moreRows = getNextCoveredIndexRow(result);
if (!result.isEmpty()) {
- previousResultRowKey = CellUtil.cloneRow(result.get(0));
+ previousResultRowKey = CellUtil.cloneRow((Cell) (result.get(0)));
}
return moreRows;
} else {
@@ -456,7 +456,7 @@ public boolean next(List result, ScannerContext scannerContext) throws IOE
* @param includeInitStartRowKey scan start rowkey included.
* @param scan scan object.
*/
- private void updateDummyWithPrevRowKey(List result, byte[] initStartRowKey,
+ private void updateDummyWithPrevRowKey(List result, byte[] initStartRowKey,
boolean includeInitStartRowKey, Scan scan) {
result.clear();
if (previousResultRowKey != null) {
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredLocalIndexRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredLocalIndexRegionScanner.java
index 6b5d124ce04..7b528e8390d 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredLocalIndexRegionScanner.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredLocalIndexRegionScanner.java
@@ -111,13 +111,13 @@ protected boolean scanIndexTableRows(List result, ScannerContext scannerCo
}
@Override
- public boolean next(List result, ScannerContext scannerContext) throws IOException {
+ public boolean next(List result, ScannerContext scannerContext) throws IOException {
boolean hasMore = super.next(result, scannerContext);
ServerIndexUtil.wrapResultUsingOffset(result, offset);
return hasMore;
}
- public boolean next(List result) throws IOException {
+ public boolean next(List result) throws IOException {
return next(result, null);
}
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index d85f8005405..d3f18ce8a39 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -69,7 +69,6 @@
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.io.WritableUtils;
import org.apache.phoenix.coprocessor.generated.PTableProtos;
import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants;
@@ -325,8 +324,7 @@ void checkForRegionClosingOrSplitting() throws IOException {
}
@Override
- public void preScannerOpen(ObserverContext e, Scan scan)
- throws IOException {
+ public void preScannerOpen(ObserverContext e, Scan scan) throws IOException {
super.preScannerOpen(e, scan);
if (ScanUtil.isAnalyzeTable(scan)) {
scan.setAttribute(BaseScannerRegionObserverConstants.SCAN_ANALYZE_ACTUAL_START_ROW,
@@ -613,11 +611,13 @@ private boolean areMutationsInSameTable(Table targetHTable, Region region) {
}
@Override
- public InternalScanner preCompact(ObserverContext c, Store store,
- InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker,
- CompactionRequest request) throws IOException {
+ public InternalScanner preCompact(ObserverContext c, Store store, InternalScanner scanner,
+ ScanType scanType, CompactionLifeCycleTracker tracker, CompactionRequest request)
+ throws IOException {
+
+ RegionCoprocessorEnvironment e = (RegionCoprocessorEnvironment) c.getEnvironment();
- final TableName tableName = c.getEnvironment().getRegion().getRegionInfo().getTable();
+ final TableName tableName = e.getRegion().getRegionInfo().getTable();
// Compaction and split upcalls run with the effective user context of the requesting user.
// This will lead to failure of cross cluster RPC if the effective user is not
// the login user. Switch to the login user context to ensure we have the expected
@@ -655,7 +655,7 @@ public InternalScanner run() throws Exception {
// indexing design.
if (
table != null && !PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME.equals(fullTableName)
- && !ServerUtil.hasCoprocessor(c.getEnvironment(), GlobalIndexChecker.class.getName())
+ && !ServerUtil.hasCoprocessor(e, GlobalIndexChecker.class.getName())
) {
List indexes = PTableType.INDEX.equals(table.getType())
? Lists.newArrayList(table)
@@ -671,7 +671,7 @@ public InternalScanner run() throws Exception {
}
}
if (table != null && isPhoenixCompactionEnabled(c.getEnvironment().getConfiguration())) {
- internalScanner = new CompactionScanner(c.getEnvironment(), store, scanner,
+ internalScanner = new CompactionScanner(e, store, scanner,
BaseScannerRegionObserverConstants
.getMaxLookbackInMillis(c.getEnvironment().getConfiguration()),
request.isMajor() || request.isAllFiles(), keepDeleted, table);
@@ -683,8 +683,7 @@ public InternalScanner run() throws Exception {
try {
long clientTimeStamp = EnvironmentEdgeManager.currentTimeMillis();
DelegateRegionCoprocessorEnvironment compactionConfEnv =
- new DelegateRegionCoprocessorEnvironment(c.getEnvironment(),
- ConnectionType.COMPACTION_CONNECTION);
+ new DelegateRegionCoprocessorEnvironment(e, ConnectionType.COMPACTION_CONNECTION);
StatisticsCollector statisticsCollector = StatisticsCollectorFactory
.createStatisticsCollector(compactionConfEnv, tableName.getNameAsString(),
clientTimeStamp, store.getColumnFamilyDescriptor().getName());
@@ -817,17 +816,17 @@ public void close() throws IOException {
}
@Override
- public boolean next(List results, ScannerContext scannerContext) throws IOException {
+ public boolean next(List results, ScannerContext scannerContext) throws IOException {
return next(results);
}
@Override
- public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException {
+ public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException {
return next(results, scannerContext);
}
@Override
- public boolean next(List results) throws IOException {
+ public boolean next(List results) throws IOException {
results.add(aggKeyValue);
return false;
}
@@ -1015,8 +1014,7 @@ private void waitForScansToFinish(ObserverContext
}
@Override
- public void preBulkLoadHFile(ObserverContext c,
- List> familyPaths) throws IOException {
+ public void preBulkLoadHFile(ObserverContext c, List familyPaths) throws IOException {
// Don't allow bulkload if operations need read and write to same region are going on in the
// the coprocessors to avoid dead lock scenario. See PHOENIX-3111.
synchronized (lock) {
@@ -1028,8 +1026,7 @@ public void preBulkLoadHFile(ObserverContext c,
}
@Override
- public void preClose(ObserverContext c, boolean abortRequested)
- throws IOException {
+ public void preClose(ObserverContext c, boolean abortRequested) throws IOException {
waitForScansToFinish(c);
}
@@ -1039,8 +1036,8 @@ protected boolean isRegionObserverFor(Scan scan) {
}
@Override
- public void preBatchMutate(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws IOException {
+ public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws IOException {
final Configuration conf = c.getEnvironment().getConfiguration();
try {
final HAGroupStoreManager haGroupStoreManager = HAGroupStoreManager.getInstance(conf);
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionScanner.java
index 1a4794e86e6..b8090278803 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionScanner.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionScanner.java
@@ -590,18 +590,17 @@ void insertEmptyKeyValue(List results,
}
@Override
- public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException {
+ public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException {
return next(results, scannerContext);
}
@Override
- public boolean next(List resultsToReturn) throws IOException {
+ public boolean next(List resultsToReturn) throws IOException {
return next(resultsToReturn, null);
}
@Override
- public boolean next(List resultsToReturn, ScannerContext scannerContext)
- throws IOException {
+ public boolean next(List resultsToReturn, ScannerContext scannerContext) throws IOException {
boolean hasMore;
Configuration conf = env.getConfiguration();
final TenantCache tenantCache = GlobalCache.getTenantCache(env, ScanUtil.getTenantId(scan));
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
index fc55b2435dc..22770c27229 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
@@ -517,9 +517,10 @@ public void stop(CoprocessorEnvironment e) throws IOException {
* clauses for this row.
*/
@Override
- public Result preIncrementAfterRowLock(final ObserverContext e,
- final Increment inc) throws IOException {
+ public Result preIncrementAfterRowLock(final ObserverContext c, final Increment inc)
+ throws IOException {
long start = EnvironmentEdgeManager.currentTimeMillis();
+ RegionCoprocessorEnvironment e = (RegionCoprocessorEnvironment) c.getEnvironment();
try {
List mutations = this.builder.executeAtomicOp(inc);
if (mutations == null) {
@@ -528,18 +529,18 @@ public Result preIncrementAfterRowLock(final ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws IOException {
+ public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws IOException {
if (this.disabled) {
return;
}
@@ -1693,8 +1694,7 @@ private void removeBatchMutateContext(ObserverContext c, WALKey key,
- WALEdit edit) {
+ public void preWALAppend(ObserverContext c, WALKey key, WALEdit edit) {
if (shouldWALAppend) {
BatchMutateContext context = getBatchMutateContext(c);
appendMutationAttributesToWALKey(key, context);
@@ -1725,8 +1725,8 @@ public void appendMutationAttributesToWALKey(WALKey key,
* batch fails.
*/
@Override
- public void postBatchMutateIndispensably(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp, final boolean success) throws IOException {
+ public void postBatchMutateIndispensably(ObserverContext c,
+ MiniBatchOperationInProgress miniBatchOp, final boolean success) throws IOException {
if (this.disabled) {
return;
}
@@ -1746,9 +1746,9 @@ public void postBatchMutateIndispensably(ObserverContext cells = new ArrayList<>();
cells.add(cell);
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index e33188c3fb7..43148cd9385 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -293,9 +293,10 @@ public void stop(CoprocessorEnvironment e) throws IOException {
* clauses for this row.
*/
@Override
- public Result preIncrementAfterRowLock(final ObserverContext e,
- final Increment inc) throws IOException {
+ public Result preIncrementAfterRowLock(final ObserverContext c, final Increment inc)
+ throws IOException {
long start = EnvironmentEdgeManager.currentTimeMillis();
+ RegionCoprocessorEnvironment e = (RegionCoprocessorEnvironment) c.getEnvironment();
try {
List mutations = this.builder.executeAtomicOp(inc);
if (mutations == null) {
@@ -304,18 +305,18 @@ public Result preIncrementAfterRowLock(final ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws IOException {
+ public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws IOException {
if (this.disabled) {
return;
}
@@ -552,8 +553,8 @@ private void removeBatchMutateContext(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp, final boolean success) throws IOException {
+ public void postBatchMutateIndispensably(ObserverContext c,
+ MiniBatchOperationInProgress miniBatchOp, final boolean success) throws IOException {
if (this.disabled) {
return;
}
@@ -648,9 +649,9 @@ private Collection> extractIndexUpdate(WALEdit edit) {
}
@Override
- public void postOpen(final ObserverContext c) {
- Multimap updates =
- failedIndexEdits.getEdits(c.getEnvironment().getRegion());
+ public void postOpen(final ObserverContext c) {
+ RegionCoprocessorEnvironment e = (RegionCoprocessorEnvironment) c.getEnvironment();
+ Multimap updates = failedIndexEdits.getEdits(e.getRegion());
if (this.disabled) {
return;
@@ -670,9 +671,9 @@ public void postOpen(final ObserverContext c) {
// writes succeed again
try {
writer.writeAndHandleFailure(updates, true, ScanUtil.UNKNOWN_CLIENT_VERSION);
- } catch (IOException e) {
+ } catch (IOException ex) {
LOGGER.error("During WAL replay of outstanding index updates, "
- + "Exception is thrown instead of killing server during index writing", e);
+ + "Exception is thrown instead of killing server during index writing", ex);
}
} finally {
long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
index 82434bc230f..e55a7d71162 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.hbase.CellBuilder;
import org.apache.hadoop.hbase.CellBuilderFactory;
import org.apache.hadoop.hbase.CellBuilderType;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Mutation;
@@ -32,6 +33,7 @@
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.wal.WALEdit;
+import org.apache.phoenix.compat.hbase.CompatUtil;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
public class IndexedKeyValue extends KeyValue {
@@ -82,7 +84,8 @@ public IndexedKeyValue() {
}
private IndexedKeyValue(Cell c, byte[] bs, Mutation mutation) {
- super(c);
+ // FIXME blind cast, may fail with synthetic cells
+ super((ExtendedCell) c);
this.indexTableName = new ImmutableBytesPtr(bs);
this.mutation = mutation;
this.hashCode = calcHashCode(indexTableName, mutation);
@@ -155,9 +158,9 @@ public void markBatchFinished() {
protected MutationProto toMutationProto(Mutation mutation) throws IOException {
MutationProto m = null;
if (mutation instanceof Put) {
- m = org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(MutationType.PUT, mutation);
+ m = CompatUtil.toMutation(MutationType.PUT, mutation);
} else if (mutation instanceof Delete) {
- m = org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(MutationType.DELETE, mutation);
+ m = CompatUtil.toMutation(MutationType.DELETE, mutation);
} else {
throw new IOException("Put/Delete mutations only supported");
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java
index 94c7cb2a157..f176ba33477 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compat.hbase.CompatUtil;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
/**
@@ -73,7 +74,7 @@ public static KeyValue readKeyValue(DataInput in) throws IOException {
ImmutableBytesPtr indexTableName = new ImmutableBytesPtr(Bytes.readByteArray(in));
byte[] mutationData = Bytes.readByteArray(in);
ClientProtos.MutationProto mProto = ClientProtos.MutationProto.parseFrom(mutationData);
- Mutation mutation = org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(mProto);
+ Mutation mutation = CompatUtil.toMutation(mProto);
IndexedKeyValue kv = null;
if (mutation != null) {
kv = IndexedKeyValue.newIndexedKeyValue(indexTableName.copyBytesIfNecessary(), mutation);
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
index 799eef719aa..641560c050f 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.compat.hbase.CompatUtil;
import org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException;
import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
@@ -132,11 +133,7 @@ public void stop(String why) {
LOGGER.error("Closing the connection failed: ", e);
}
}
- try {
- admin.close();
- } catch (IOException e) {
- LOGGER.error("Closing the admin failed: ", e);
- }
+ CompatUtil.closeAdminAndLog(admin, LOGGER);
}
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/index/GlobalIndexChecker.java b/phoenix-core-server/src/main/java/org/apache/phoenix/index/GlobalIndexChecker.java
index 97f23c498f6..0fd06775025 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/index/GlobalIndexChecker.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/index/GlobalIndexChecker.java
@@ -36,7 +36,6 @@
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
@@ -56,6 +55,7 @@
import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.apache.hadoop.hbase.regionserver.ScannerContext;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compat.hbase.CompatUtil;
import org.apache.phoenix.coprocessor.BaseRegionScanner;
import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.coprocessor.DelegateRegionScanner;
@@ -250,7 +250,7 @@ private boolean shouldCreateUnverifiedRowFilter(Filter delegateFilter) {
return true;
}
- public boolean next(List result, boolean raw, ScannerContext scannerContext)
+ public boolean next(List result, boolean raw, ScannerContext scannerContext)
throws IOException {
try {
if (!initialized) {
@@ -273,7 +273,7 @@ public boolean next(List result, boolean raw, ScannerContext scannerContex
if (isDummy(result)) {
return true;
}
- Cell cell = result.get(0);
+ Cell cell = (Cell) (result.get(0));
if (verifyRowAndRepairIfNecessary(result)) {
break;
}
@@ -301,22 +301,22 @@ public boolean next(List result, boolean raw, ScannerContext scannerContex
}
@Override
- public boolean next(List result) throws IOException {
+ public boolean next(List result) throws IOException {
return next(result, false, null);
}
@Override
- public boolean nextRaw(List result) throws IOException {
+ public boolean nextRaw(List result) throws IOException {
return next(result, true, null);
}
@Override
- public boolean next(List result, ScannerContext scannerContext) throws IOException {
+ public boolean next(List result, ScannerContext scannerContext) throws IOException {
return next(result, false, scannerContext);
}
@Override
- public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException {
+ public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException {
return next(result, true, scannerContext);
}
@@ -357,8 +357,8 @@ private void repairIndexRows(byte[] indexRowKey, long ts, List row) throws
// mvcc value of -1 will ensure that new scanners opened on index table using
// indexScan and singleRowIndexScan are able to read the latest snapshot of the
// index updates.
- PackagePrivateFieldAccessor.setMvccReadPoint(indexScan, -1);
- PackagePrivateFieldAccessor.setMvccReadPoint(singleRowIndexScan, -1);
+ CompatUtil.setMvccReadPoint(indexScan, -1);
+ CompatUtil.setMvccReadPoint(singleRowIndexScan, -1);
byte[] dataTableName =
scan.getAttribute(BaseScannerRegionObserverConstants.PHYSICAL_DATA_TABLE_NAME);
dataHTable = ServerUtil.ConnectionFactory
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java b/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
index c45b93eb115..3d0fa3f6115 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
@@ -136,16 +136,18 @@ public void remove() {
}
@Override
- public void preBatchMutate(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws IOException {
+ public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws IOException {
- Mutation m = miniBatchOp.getOperation(0);
+ Mutation m = (Mutation) miniBatchOp.getOperation(0);
if (!codec.isEnabled(m)) {
return;
}
+ RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) c.getEnvironment();
+
PhoenixIndexMetaData indexMetaData =
- new PhoenixIndexMetaDataBuilder(c.getEnvironment()).getIndexMetaData(miniBatchOp);
+ new PhoenixIndexMetaDataBuilder(env).getIndexMetaData(miniBatchOp);
if (
indexMetaData.getClientVersion() >= MetaDataProtocol.MIN_TX_CLIENT_SIDE_MAINTENANCE
&& !indexMetaData.hasLocalIndexes()
@@ -163,7 +165,6 @@ public void preBatchMutate(ObserverContext c,
current = NullSpan.INSTANCE;
}
- RegionCoprocessorEnvironment env = c.getEnvironment();
PhoenixTransactionContext txnContext = indexMetaData.getTransactionContext();
if (txnContext == null) {
throw new NullPointerException("Expected to find transaction in metadata for "
@@ -176,7 +177,7 @@ public void preBatchMutate(ObserverContext c,
// get the index updates for all elements in this batch
indexUpdates = generator.getIndexUpdates(htable, getMutationIterator(miniBatchOp));
}
- byte[] tableName = c.getEnvironment().getRegionInfo().getTable().getName();
+ byte[] tableName = env.getRegionInfo().getTable().getName();
Iterator> indexUpdatesItr = indexUpdates.iterator();
List localUpdates = new ArrayList(indexUpdates.size());
while (indexUpdatesItr.hasNext()) {
@@ -206,8 +207,8 @@ public void preBatchMutate(ObserverContext c,
}
@Override
- public void postBatchMutateIndispensably(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp, final boolean success) throws IOException {
+ public void postBatchMutateIndispensably(ObserverContext c,
+ MiniBatchOperationInProgress miniBatchOp, final boolean success) throws IOException {
BatchMutateContext context = getBatchMutateContext(c);
if (context == null || context.indexUpdates == null) {
return;
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/NonAggregateRegionScannerFactory.java b/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/NonAggregateRegionScannerFactory.java
index 65affd6e792..e2cca1bc9c4 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/NonAggregateRegionScannerFactory.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/NonAggregateRegionScannerFactory.java
@@ -455,12 +455,12 @@ public boolean isFilterDone() {
}
@Override
- public boolean next(List results) throws IOException {
+ public boolean next(List results) throws IOException {
return next(results, null);
}
@Override
- public boolean next(List results, ScannerContext scannerContext) throws IOException {
+ public boolean next(List results, ScannerContext scannerContext) throws IOException {
try {
if (isFilterDone()) {
return false;
@@ -510,7 +510,7 @@ public boolean next(List results, ScannerContext scannerContext) throws IO
}
@Override
- public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException {
+ public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException {
return next(results, scannerContext);
}
@@ -594,12 +594,12 @@ public boolean isFilterDone() {
}
@Override
- public boolean next(List results) throws IOException {
+ public boolean next(List results) throws IOException {
return next(results, null);
}
@Override
- public boolean next(List results, ScannerContext scannerContext) throws IOException {
+ public boolean next(List results, ScannerContext scannerContext) throws IOException {
try {
if (isFilterDone()) {
return false;
@@ -632,7 +632,7 @@ public boolean next(List results, ScannerContext scannerContext) throws IO
}
@Override
- public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException {
+ public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException {
return next(results, scannerContext);
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java b/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java
index bda112e7092..e18405bd2cb 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java
@@ -195,12 +195,12 @@ public byte[] getActualStartKey() {
}
@Override
- public boolean next(List results) throws IOException {
+ public boolean next(List results) throws IOException {
return next(results, null);
}
@Override
- public boolean next(List results, ScannerContext scannerContext) throws IOException {
+ public boolean next(List results, ScannerContext scannerContext) throws IOException {
try {
boolean next =
(scannerContext == null) ? s.next(results) : s.next(results, scannerContext);
@@ -240,12 +240,12 @@ public long getMvccReadPoint() {
}
@Override
- public boolean nextRaw(List result) throws IOException {
+ public boolean nextRaw(List result) throws IOException {
return nextRaw(result, null);
}
@Override
- public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException {
+ public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException {
try {
boolean next =
(scannerContext == null) ? s.nextRaw(result) : s.nextRaw(result, scannerContext);
@@ -285,7 +285,7 @@ public boolean nextRaw(List result, ScannerContext scannerContext) throws
) {
int resultPosition = replaceServerParsedExpressionElement(serverParsedKVRefs,
serverParsedFuncRefs, result);
- serverParsedResultCell = result.get(resultPosition);
+ serverParsedResultCell = (Cell) result.get(resultPosition);
}
if (projector != null) {
Tuple toProject = useQualifierAsListIndex
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
index 90bcdb8abe7..52f4b4d742f 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
@@ -49,6 +49,7 @@
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.phoenix.compat.hbase.CompatRegionCoprocessorEnvironment;
import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -196,7 +197,7 @@ public boolean renewLease() {
}
private RegionCoprocessorEnvironment getSnapshotContextEnvironment(final Configuration conf) {
- return new RegionCoprocessorEnvironment() {
+ return new CompatRegionCoprocessorEnvironment() {
@Override
public Region getRegion() {
return region;
@@ -276,6 +277,7 @@ public Connection createConnection(Configuration conf) throws IOException {
public ExtendedCellBuilder getCellBuilder() {
throw new UnsupportedOperationException();
}
+
};
}
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
index 9847729b119..79379a294b8 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
@@ -41,6 +41,7 @@
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.TableName;
@@ -145,7 +146,7 @@ static RecordWriter createRecordWriter(
@Override
public void write(TableRowkeyPair row, V cell) throws IOException {
- Cell kv = cell;
+ ExtendedCell kv = (ExtendedCell) cell;
// null input == user explicitly wants to flush
if (row == null && kv == null) {
rollWriters();
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputSplit.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputSplit.java
index c71e6ca5d1c..0ec5e038080 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputSplit.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputSplit.java
@@ -22,11 +22,11 @@
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.phoenix.compat.hbase.CompatUtil;
import org.apache.phoenix.query.KeyRange;
import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
@@ -87,7 +87,7 @@ public void readFields(DataInput input) throws IOException {
byte[] protoScanBytes = new byte[WritableUtils.readVInt(input)];
input.readFully(protoScanBytes);
ClientProtos.Scan protoScan = ClientProtos.Scan.parseFrom(protoScanBytes);
- Scan scan = ProtobufUtil.toScan(protoScan);
+ Scan scan = CompatUtil.toScan(protoScan);
scans.add(scan);
}
init();
@@ -101,7 +101,7 @@ public void write(DataOutput output) throws IOException {
Preconditions.checkNotNull(scans);
WritableUtils.writeVInt(output, scans.size());
for (Scan scan : scans) {
- ClientProtos.Scan protoScan = ProtobufUtil.toScan(scan);
+ ClientProtos.Scan protoScan = CompatUtil.toScan(scan);
byte[] protoScanBytes = protoScan.toByteArray();
WritableUtils.writeVInt(output, protoScanBytes.length);
output.write(protoScanBytes);
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
index b328b9ef035..aa1cfd1d0e0 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
@@ -27,7 +27,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Result;
@@ -343,7 +342,7 @@ public void collectStatistics(final List results) {
incrementRow = false;
}
}
- int kvLength = KeyValueUtil.getSerializedSize(cell, true);
+ int kvLength = cell.getSerializedSize();
long byteCount = gps.getFirst() + kvLength;
gps.setFirst(byteCount);
if (byteCount >= guidePostDepth) {
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
index 3e4f813cac0..6479414a3c4 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
@@ -65,14 +65,14 @@ public StatisticsScanner(StatisticsCollector tracker, StatisticsWriter stats,
}
@Override
- public boolean next(List result) throws IOException {
+ public boolean next(List result) throws IOException {
boolean ret = delegate.next(result);
updateStats(result);
return ret;
}
@Override
- public boolean next(List result, ScannerContext scannerContext) throws IOException {
+ public boolean next(List result, ScannerContext scannerContext) throws IOException {
return next(result);
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
index 7c3da2a3121..42af9168237 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
@@ -45,13 +45,13 @@
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compat.hbase.CompatUtil;
import org.apache.phoenix.coprocessorclient.MetaDataProtocol;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -251,7 +251,7 @@ public Void run() throws Exception {
byte[] row = mutations.get(0).getRow();
MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder();
for (Mutation m : mutations) {
- mrmBuilder.addMutationRequest(ProtobufUtil.toMutation(getMutationType(m), m));
+ mrmBuilder.addMutationRequest(CompatUtil.toMutation(getMutationType(m), m));
}
MutateRowsRequest mrm = mrmBuilder.build();
CoprocessorRpcChannel channel = statsWriterTable.coprocessorService(row);
@@ -260,7 +260,7 @@ public Void run() throws Exception {
try {
service.mutateRows(null, mrm);
} catch (ServiceException ex) {
- ProtobufUtil.toIOException(ex);
+ toIOException(ex);
}
}
return null;
@@ -309,4 +309,20 @@ public void deleteStatsForRegion(Region region, StatisticsCollector tracker,
}
}
+ /**
+ * Unwraps an exception from a protobuf service into the underlying (expected) IOException. This
+ * method will always throw an exception.
+ * @param se the {@code ServiceException} instance to convert into an {@code IOException}
+ */
+ private static void toIOException(ServiceException se) throws IOException {
+ if (se == null) {
+ throw new NullPointerException("Null service exception passed!");
+ }
+
+ Throwable cause = se.getCause();
+ if (cause != null && cause instanceof IOException) {
+ throw (IOException) cause;
+ }
+ throw new IOException(se);
+ }
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/util/ServerIndexUtil.java b/phoenix-core-server/src/main/java/org/apache/phoenix/util/ServerIndexUtil.java
index 2bfcebd78b4..3dad182eabe 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/util/ServerIndexUtil.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/util/ServerIndexUtil.java
@@ -25,9 +25,9 @@
import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.PhoenixTagType;
import org.apache.hadoop.hbase.PrivateCellUtil;
-import org.apache.hadoop.hbase.RawCell;
import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
@@ -53,7 +53,7 @@ public static void wrapResultUsingOffset(List result, final int offset) th
while (itr.hasNext()) {
final Cell cell = itr.next();
// TODO: Create DelegateCell class instead
- Cell newCell = new OffsetCell(cell, offset);
+ Cell newCell = new OffsetCell((ExtendedCell) cell, offset);
itr.set(newCell);
}
}
@@ -78,10 +78,9 @@ public static void setDeleteAttributes(MiniBatchOperationInProgress mi
Tag sourceOpTag = new ArrayBackedTag(PhoenixTagType.SOURCE_OPERATION_TAG_TYPE, sourceOpAttr);
List| updatedCells = new ArrayList<>();
for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) {
- Cell cell = cellScanner.current();
- RawCell rawCell = (RawCell) cell;
+ ExtendedCell cell = (ExtendedCell) cellScanner.current();
List tags = new ArrayList<>();
- Iterator tagsIterator = rawCell.getTags();
+ Iterator tagsIterator = cell.getTags();
while (tagsIterator.hasNext()) {
tags.add(tagsIterator.next());
}
diff --git a/phoenix-core/.attach_pid119139 b/phoenix-core/.attach_pid119139
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/phoenix-core/.attach_pid44314 b/phoenix-core/.attach_pid44314
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/phoenix-core/.attach_pid62857 b/phoenix-core/.attach_pid62857
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/phoenix-core/.attach_pid71767 b/phoenix-core/.attach_pid71767
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 0570a64a8d4..73006c15f1d 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -91,11 +91,6 @@
hbase-metrics-api
test
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
-
- org.apache.hbase
- hbase-protocol
- test
-
org.apache.hbase
hbase-protocol-shaded
@@ -117,11 +112,11 @@
test-jar
test
-
- org.apache.hbase
- hbase-hadoop2-compat
- test
-
+
+
+
+
+
org.apache.hbase
hbase-mapreduce
@@ -483,10 +478,6 @@
-
- org.codehaus.mojo
- build-helper-maven-plugin
-
org.apache.maven.plugins
maven-failsafe-plugin
@@ -503,5 +494,231 @@
+ ${actualSourceDirectory}
+ ${actualTestSourceDirectory}
+
+
+ hbase-2.x
+
+
+ hbase.profile
+
+ !3.0
+
+
+
+ src/main/java
+ src/test/java
+
+
+
+ org.apache.hbase
+ hbase-hadoop2-compat
+ test
+
+
+ org.apache.hbase
+ hbase-protocol
+ test
+
+
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
+
+ add-testresource
+
+ add-test-resource
+
+ generate-sources
+
+
+
+ ${basedir}/src/test/resources
+
+
+
+
+
+ add-it-source
+
+ add-test-source
+
+ generate-sources
+
+
+ ${basedir}/src/it/java
+
+
+
+
+ add-it-resource
+
+ add-test-resource
+
+ generate-sources
+
+
+
+ ${basedir}/src/it/resources
+
+
+
+
+
+
+
+
+
+
+ hbase-3.x
+
+
+ hbase.profile
+ 3.0
+
+
+
+ 4.31.1
+ target/generated-sources/replaced/main/java
+ target/generated-sources/replaced/test/java
+
+
+
+
+ com.google.code.maven-replacer-plugin
+ replacer
+
+
+ replace-generated-sources
+
+ replace
+
+ process-sources
+
+ ${basedir}/target/generated-sources/protobuf
+
+ **/*.java
+
+ true
+
+
+ ([^\.])com.google.protobuf
+ $1org.apache.hbase.thirdparty.com.google.protobuf
+
+
+ ([^\.])org.apache.hadoop.hbase.protobuf.generated
+ $1org.apache.hadoop.hbase.shaded.protobuf.generated
+
+
+
+
+
+ replace-sources
+
+ replace
+
+ process-sources
+
+ ${basedir}/src
+ ../target/generated-sources/replaced
+
+ **/*.java
+
+
+
+ **/OmidTransactionContext*.java
+
+
+
+
+
+ ([^\.])com.google.protobuf
+ $1org.apache.hbase.thirdparty.com.google.protobuf
+
+
+ ([^\.])org.apache.hadoop.hbase.protobuf.generated
+ $1org.apache.hadoop.hbase.shaded.protobuf.generated
+
+
+
+
+
+ copy-sources
+
+ replace
+
+ process-sources
+
+ ${basedir}/src
+ ../target/generated-sources/replaced
+
+
+ **/OmidTransactionContext*.java
+
+
+
+
+
+
+
+
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
+
+ add-test-resource
+
+ add-test-resource
+
+ generate-sources
+
+
+
+ ${basedir}/src/test/resources
+
+
+
+
+
+ add-it-source
+
+ add-test-source
+
+ generate-sources
+
+
+ ${basedir}/target/generated-sources/replaced/it/java
+
+
+
+
+ add-it-resource
+
+ add-test-resource
+
+ generate-sources
+
+
+
+ ${basedir}/src/it/resources
+
+
+
+
+
+
+
+
+
+
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
index 56a9d79f31a..c3653587e2a 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
@@ -296,7 +296,7 @@ private Path runWALSplit(final Configuration c, WALFactory walFactory) throws IO
@SuppressWarnings("deprecation")
private int getKeyValueCount(Table table) throws IOException {
Scan scan = new Scan();
- scan.setMaxVersions(Integer.MAX_VALUE - 1);
+ scan.readVersions(Integer.MAX_VALUE - 1);
ResultScanner results = table.getScanner(scan);
int count = 0;
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
index e0d3bc1b35c..b5f63f91553 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
@@ -48,15 +48,13 @@
import org.apache.hadoop.hbase.AuthUtil;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.LocalHBaseCluster;
-import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter.Predicate;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.security.AccessDeniedException;
@@ -67,6 +65,7 @@
import org.apache.hadoop.hbase.security.access.Permission;
import org.apache.hadoop.hbase.security.access.UserPermission;
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
+import org.apache.phoenix.compat.hbase.CompatUtil;
import org.apache.phoenix.coprocessorclient.MetaDataProtocol;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -93,7 +92,7 @@
import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
-public abstract class BasePermissionsIT extends BaseTest {
+public abstract class BasePermissionsIT {
private static final Logger LOGGER = LoggerFactory.getLogger(BasePermissionsIT.class);
@@ -101,7 +100,7 @@ public abstract class BasePermissionsIT extends BaseTest {
private static final String SUPER_USER = System.getProperty("user.name");
- static HBaseTestingUtility testUtil;
+ static IntegrationTestingUtility testUtil;
private static final Set PHOENIX_SYSTEM_TABLES =
new HashSet<>(Arrays.asList("SYSTEM.CATALOG", "SYSTEM.SEQUENCE", "SYSTEM.STATS",
"SYSTEM.FUNCTION", "SYSTEM.MUTEX", "SYSTEM.CHILD_LINK", "SYSTEM.TRANSFORM",
@@ -165,7 +164,7 @@ public abstract class BasePermissionsIT extends BaseTest {
BasePermissionsIT(final boolean isNamespaceMapped) throws Exception {
this.isNamespaceMapped = isNamespaceMapped;
- this.tableName = generateUniqueName();
+ this.tableName = BaseTest.generateUniqueName();
}
static void initCluster(boolean isNamespaceMapped) throws Exception {
@@ -179,7 +178,7 @@ static void initCluster(boolean isNamespaceMapped, boolean useCustomAccessContro
testUtil = null;
}
- testUtil = new HBaseTestingUtility();
+ testUtil = new IntegrationTestingUtility();
Configuration config = testUtil.getConfiguration();
enablePhoenixHBaseAuthorization(config, useCustomAccessController);
@@ -198,23 +197,23 @@ static void initCluster(boolean isNamespaceMapped, boolean useCustomAccessContro
public void initUsersAndTables() {
Configuration configuration = testUtil.getConfiguration();
- regularUser1 = User.createUserForTesting(configuration, "regularUser1_" + generateUniqueName(),
- new String[0]);
- regularUser2 = User.createUserForTesting(configuration, "regularUser2_" + generateUniqueName(),
- new String[0]);
- regularUser3 = User.createUserForTesting(configuration, "regularUser3_" + generateUniqueName(),
- new String[0]);
- regularUser4 = User.createUserForTesting(configuration, "regularUser4_" + generateUniqueName(),
- new String[0]);
+ regularUser1 = User.createUserForTesting(configuration,
+ "regularUser1_" + BaseTest.generateUniqueName(), new String[0]);
+ regularUser2 = User.createUserForTesting(configuration,
+ "regularUser2_" + BaseTest.generateUniqueName(), new String[0]);
+ regularUser3 = User.createUserForTesting(configuration,
+ "regularUser3_" + BaseTest.generateUniqueName(), new String[0]);
+ regularUser4 = User.createUserForTesting(configuration,
+ "regularUser4_" + BaseTest.generateUniqueName(), new String[0]);
groupUser = User.createUserForTesting(testUtil.getConfiguration(),
- "groupUser_" + generateUniqueName(), new String[] { GROUP_SYSTEM_ACCESS });
+ "groupUser_" + BaseTest.generateUniqueName(), new String[] { GROUP_SYSTEM_ACCESS });
unprivilegedUser = User.createUserForTesting(configuration,
- "unprivilegedUser_" + generateUniqueName(), new String[0]);
+ "unprivilegedUser_" + BaseTest.generateUniqueName(), new String[0]);
- schemaName = generateUniqueName();
- tableName = generateUniqueName();
+ schemaName = BaseTest.generateUniqueName();
+ tableName = BaseTest.generateUniqueName();
fullTableName = schemaName + "." + tableName;
idx1TableName = tableName + "_IDX1";
idx2TableName = tableName + "_IDX2";
@@ -258,7 +257,7 @@ private static void configureStatsConfigurations(Configuration conf) {
conf.set(QueryServices.USE_STATS_FOR_PARALLELIZATION, Boolean.toString(true));
}
- public static HBaseTestingUtility getUtility() {
+ public static IntegrationTestingUtility getUtility() {
return testUtil;
}
@@ -363,7 +362,7 @@ public Connection getConnection(String tenantId) throws SQLException {
}
protected static String getUrl() {
- return "jdbc:phoenix:localhost:" + testUtil.getZkCluster().getClientPort() + ":/hbase";
+ return "jdbc:phoenix+zk:localhost:" + testUtil.getZkCluster().getClientPort() + ":/hbase";
}
private static Set getHBaseTables() throws IOException {
@@ -1662,8 +1661,8 @@ public Void run() throws Exception {
@Test
public void testUpsertIntoImmutableTable() throws Throwable {
- final String schema = generateUniqueName();
- final String tableName = generateUniqueName();
+ final String schema = BaseTest.generateUniqueName();
+ final String tableName = BaseTest.generateUniqueName();
final String phoenixTableName = schema + "." + tableName;
grantSystemTableAccess();
superUser1.runAs(new PrivilegedExceptionAction() {
@@ -1765,7 +1764,7 @@ public void getUserPermissions(RpcController controller,
final List perms = new ArrayList<>();
if (request.getType() == AccessControlProtos.Permission.Type.Table) {
final TableName table =
- request.hasTableName() ? ProtobufUtil.toTableName(request.getTableName()) : null;
+ request.hasTableName() ? CompatUtil.toTableName(request.getTableName()) : null;
perms.addAll(AccessControlClient.getUserPermissions(connection, table.getNameAsString()));
} else if (request.getType() == AccessControlProtos.Permission.Type.Namespace) {
final String namespace =
@@ -1790,9 +1789,9 @@ public void getUserPermissions(RpcController controller,
// Copied from org.apache.hadoop.hbase.security.access.SecureTestUtil because it's not visible
// there
- private static List getAccessControllers(MiniHBaseCluster cluster) {
+ private static List getAccessControllers() {
List result = Lists.newArrayList();
- for (RegionServerThread t : cluster.getLiveRegionServerThreads()) {
+ for (RegionServerThread t : testUtil.getHBaseCluster().getLiveRegionServerThreads()) {
for (HRegion region : t.getRegionServer().getOnlineRegionsLocalContext()) {
Coprocessor cp =
region.getCoprocessorHost().findCoprocessor(AccessController.class.getName());
@@ -1806,9 +1805,9 @@ private static List getAccessControllers(MiniHBaseCluster clus
// Copied from org.apache.hadoop.hbase.security.access.SecureTestUtil because it's not visible
// there
- private static Map getAuthManagerMTimes(MiniHBaseCluster cluster) {
+ private static Map getAuthManagerMTimes() {
Map result = Maps.newHashMap();
- for (AccessController ac : getAccessControllers(cluster)) {
+ for (AccessController ac : getAccessControllers()) {
result.put(ac, ac.getAuthManager().getMTime());
}
return result;
@@ -1817,9 +1816,9 @@ private static Map getAuthManagerMTimes(MiniHBaseCluster
// Copied from org.apache.hadoop.hbase.security.access.SecureTestUtil because it's not visible
// there
@SuppressWarnings("rawtypes")
- public static void updateACLs(final HBaseTestingUtility util, Callable c) throws Exception {
+ public static void updateACLs(final IntegrationTestingUtility util, Callable c) throws Exception {
// Get the current mtimes for all access controllers
- final Map oldMTimes = getAuthManagerMTimes(util.getHBaseCluster());
+ final Map oldMTimes = getAuthManagerMTimes();
// Run the update action
c.call();
@@ -1828,7 +1827,7 @@ public static void updateACLs(final HBaseTestingUtility util, Callable c) throws
util.waitFor(WAIT_TIME, 100, new Predicate() {
@Override
public boolean evaluate() {
- Map mtimes = getAuthManagerMTimes(util.getHBaseCluster());
+ Map mtimes = getAuthManagerMTimes();
for (Map.Entry e : mtimes.entrySet()) {
if (!oldMTimes.containsKey(e.getKey())) {
LOGGER.error("Snapshot of AccessController state does not include instance on region "
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CDCStreamIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CDCStreamIT.java
index cb805bfcccc..7c260c2c191 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CDCStreamIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CDCStreamIT.java
@@ -923,7 +923,7 @@ private void assertPartitionMetadata(Connection conn, String tableName, String c
List tableRegions = conn.unwrap(PhoenixConnection.class).getQueryServices()
.getAllTableRegions(tableName.getBytes());
for (HRegionLocation tableRegion : tableRegions) {
- RegionInfo ri = tableRegion.getRegionInfo();
+ RegionInfo ri = tableRegion.getRegion();
PreparedStatement ps = conn.prepareStatement("SELECT * FROM " + SYSTEM_CDC_STREAM_NAME
+ " WHERE TABLE_NAME = ? AND STREAM_NAME = ? AND PARTITION_ID= ?");
ps.setString(1, tableName);
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
index 03ca9e1a684..42150009244 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
@@ -419,10 +419,11 @@ public static class DelayingRegionObserver extends SimpleRegionObserver {
private volatile boolean lockedTableRow;
@Override
- public void postBatchMutate(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws IOException {
+ public void postBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws IOException {
+ RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) c.getEnvironment();
try {
- String tableName = c.getEnvironment().getRegionInfo().getTable().getNameAsString();
+ String tableName = env.getRegionInfo().getTable().getNameAsString();
if (tableName.startsWith(MVCC_LOCK_TEST_TABLE_PREFIX)) {
Thread.sleep(ROW_LOCK_WAIT_TIME / 2); // Wait long enough that they'll both have the same
// mvcc
@@ -432,15 +433,16 @@ public void postBatchMutate(ObserverContext c,
}
@Override
- public void preBatchMutate(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws HBaseIOException {
+ public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws HBaseIOException {
+ RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) c.getEnvironment();
try {
- String tableName = c.getEnvironment().getRegionInfo().getTable().getNameAsString();
+ String tableName = env.getRegionInfo().getTable().getNameAsString();
if (tableName.startsWith(LOCK_TEST_TABLE_PREFIX)) {
if (lockedTableRow) {
throw new DoNotRetryIOException(
"Expected lock in preBatchMutate to be exclusive, but it wasn't for row "
- + Bytes.toStringBinary(miniBatchOp.getOperation(0).getRow()));
+ + Bytes.toStringBinary(((Mutation) miniBatchOp.getOperation(0)).getRow()));
}
lockedTableRow = true;
Thread.sleep(ROW_LOCK_WAIT_TIME + 2000);
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
index f4f57bafc8a..37a4b934c83 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
@@ -1670,7 +1670,7 @@ public void testCreateTableWithNoVerify() throws SQLException, IOException, Inte
try (Admin admin = driver
.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).getAdmin()) {
admin.createTable(TableDescriptorBuilder.newBuilder(TableName.valueOf(tableBytes))
- .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(familyName)
+ .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(familyName)
.setKeepDeletedCells(KeepDeletedCells.TRUE).build())
.build(), splits);
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
index c2a31842f9b..f7ca6ee7f41 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
@@ -41,6 +41,7 @@
import java.util.Properties;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.PhoenixTagType;
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.RawCell;
@@ -1107,7 +1108,7 @@ private void checkTagPresentInDeleteMarker(String tableName, String startRowKey,
}
}
assertFalse("Values shouldn't be empty", values.isEmpty());
- Cell first = values.get(0);
+ ExtendedCell first = (ExtendedCell) values.get(0);
assertTrue("First cell should be delete marker ", CellUtil.isDelete(first));
List tags = PrivateCellUtil.getTags(first);
if (tagPresent) {
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java
index d84c3f6bb61..f8308c232f8 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java
@@ -39,7 +39,6 @@
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.compile.ExplainPlan;
@@ -408,8 +407,7 @@ public static class DeleyOpenRegionObserver implements RegionObserver {
private CountDownLatch latch = new CountDownLatch(1);
@Override
- public void preClose(
- org.apache.hadoop.hbase.coprocessor.ObserverContext c,
+ public void preClose(org.apache.hadoop.hbase.coprocessor.ObserverContext c,
boolean abortRequested) throws IOException {
if (DELAY_OPEN) {
try {
@@ -421,9 +419,8 @@ public void preClose(
}
@Override
- public void preScannerOpen(
- org.apache.hadoop.hbase.coprocessor.ObserverContext c,
- Scan scan) throws IOException {
+ public void preScannerOpen(org.apache.hadoop.hbase.coprocessor.ObserverContext c, Scan scan)
+ throws IOException {
if (DELAY_OPEN && retryCount == 1) {
latch.countDown();
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForNonTxGlobalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForNonTxGlobalIndexIT.java
index 6d3a47d961e..847b1ef38eb 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForNonTxGlobalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForNonTxGlobalIndexIT.java
@@ -85,7 +85,6 @@
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.apache.hadoop.hbase.util.Bytes;
@@ -1648,8 +1647,8 @@ private List verifyRunStatusFromResultTable(Connection conn, Long scn, S
public static class FastFailRegionObserver implements RegionObserver, RegionCoprocessor {
@Override
- public RegionScanner postScannerOpen(final ObserverContext c,
- final Scan scan, final RegionScanner s) throws IOException {
+ public RegionScanner postScannerOpen(final ObserverContext c, final Scan scan,
+ final RegionScanner s) throws IOException {
throw new DoNotRetryIOException("I'm just a coproc that's designed to fail fast");
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java
index 3d03378dbba..a2c2076048e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java
@@ -63,14 +63,12 @@
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
@@ -415,8 +413,8 @@ public static int getMutationCount() {
}
@Override
- public void preBatchMutate(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws HBaseIOException {
+ public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws HBaseIOException {
mutationCount.addAndGet(miniBatchOp.size());
}
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetadataServerConnectionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetadataServerConnectionsIT.java
index ecebf38016f..2900a689c07 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetadataServerConnectionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetadataServerConnectionsIT.java
@@ -40,8 +40,6 @@
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ConnectionImplementation;
-import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
@@ -129,43 +127,39 @@ public void createTable(RpcController controller, MetaDataProtos.CreateTableRequ
ThreadPoolExecutor htpe = null;
// Get the thread pool executor from the connection.
- if (conn instanceof ConnectionImplementation) {
- ConnectionImplementation connImpl = ((ConnectionImplementation) conn);
- Field props = null;
- props = ConnectionImplementation.class.getDeclaredField("batchPool");
- props.setAccessible(true);
- ctpe = (ThreadPoolExecutor) props.get(connImpl);
- LOGGER.debug("ConnectionImplementation Thread pool info :" + ctpe.toString());
- }
+ Field props = null;
+ props = conn.getClass().getDeclaredField("batchPool");
+ props.setAccessible(true);
+ ctpe = (ThreadPoolExecutor) props.get(conn);
+ LOGGER.debug("ConnectionImplementation Thread pool info :" + ctpe.toString());
// Get the thread pool executor from the HTable.
Table hTable =
ServerUtil.getHTableForCoprocessorScan(env, TableName.valueOf(fullTableName));
- if (hTable instanceof HTable) {
- HTable testTable = (HTable) hTable;
- Field props = testTable.getClass().getDeclaredField("pool");
- props.setAccessible(true);
- htpe = ((ThreadPoolExecutor) props.get(hTable));
- LOGGER.debug("HTable Thread pool info :" + htpe.toString());
- // Assert the HTable thread pool config match the Connection pool configs.
- // Since we are not overriding any defaults, it should match the defaults.
- assertEquals(htpe.getMaximumPoolSize(), DEFAULT_HCONNECTION_POOL_MAX_SIZE);
- assertEquals(htpe.getCorePoolSize(), DEFAULT_HCONNECTION_POOL_CORE_SIZE);
- LOGGER.debug("HTable threadpool info {}, {}, {}, {}", htpe.getCorePoolSize(),
- htpe.getMaximumPoolSize(), htpe.getQueue().remainingCapacity(),
- htpe.getKeepAliveTime(TimeUnit.SECONDS));
-
- int count = Thread.activeCount();
- Thread[] th = new Thread[count];
- // returns the number of threads put into the array
- Thread.enumerate(th);
- long hTablePoolCount =
- Arrays.stream(th).filter(s -> s.getName().equals("htable-pool-0")).count();
- // Assert no default HTable threadpools are created.
- assertEquals(0, hTablePoolCount);
- LOGGER.debug("htable-pool-0 threads {}", hTablePoolCount);
- }
+
+ props = hTable.getClass().getDeclaredField("pool");
+ props.setAccessible(true);
+ htpe = ((ThreadPoolExecutor) props.get(hTable));
+ LOGGER.debug("HTable Thread pool info :" + htpe.toString());
+ // Assert the HTable thread pool config match the Connection pool configs.
+ // Since we are not overriding any defaults, it should match the defaults.
+ assertEquals(htpe.getMaximumPoolSize(), DEFAULT_HCONNECTION_POOL_MAX_SIZE);
+ assertEquals(htpe.getCorePoolSize(), DEFAULT_HCONNECTION_POOL_CORE_SIZE);
+ LOGGER.debug("HTable threadpool info {}, {}, {}, {}", htpe.getCorePoolSize(),
+ htpe.getMaximumPoolSize(), htpe.getQueue().remainingCapacity(),
+ htpe.getKeepAliveTime(TimeUnit.SECONDS));
+
+ int count = Thread.activeCount();
+ Thread[] th = new Thread[count];
+ // returns the number of threads put into the array
+ Thread.enumerate(th);
+ long hTablePoolCount =
+ Arrays.stream(th).filter(s -> s.getName().equals("htable-pool-0")).count();
+ // Assert no default HTable threadpools are created.
+ assertEquals(0, hTablePoolCount);
+ LOGGER.debug("htable-pool-0 threads {}", hTablePoolCount);
+
// Assert that the threadpool from Connection and HTable are the same.
assertEquals(ctpe, htpe);
} catch (RuntimeException | NoSuchFieldException | IllegalAccessException | IOException t) {
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionNSEnabledIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionNSEnabledIT.java
index f094ddb1b11..1b610adbefe 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionNSEnabledIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionNSEnabledIT.java
@@ -39,6 +39,7 @@
import org.apache.hadoop.hbase.security.access.Permission;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.util.SchemaUtil;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -73,8 +74,8 @@ public Object run() throws Exception {
@Test
public void testCreateMappedView() throws Throwable {
- final String schema = generateUniqueName();
- final String tableName = generateUniqueName();
+ final String schema = BaseTest.generateUniqueName();
+ final String tableName = BaseTest.generateUniqueName();
verifyAllowed(createSchema(schema), superUser1);
grantPermissions(regularUser1.getShortName(), schema, Permission.Action.WRITE,
Permission.Action.READ, Permission.Action.EXEC, Permission.Action.ADMIN);
@@ -100,7 +101,7 @@ public Void run() throws Exception {
@Test
public void testSchemaPermissions() throws Throwable {
grantSystemTableAccess();
- final String schemaName = "S_" + generateUniqueName();
+ final String schemaName = "S_" + BaseTest.generateUniqueName();
superUser1.runAs(new PrivilegedExceptionAction() {
@Override
public Void run() throws Exception {
@@ -165,10 +166,10 @@ public void testViewCreationFailsWhenNoExecPermsOnSystemChildLink() throws Throw
grantSystemTableAccess();
TableName systemChildLink = TableName.valueOf(SchemaUtil
.getPhysicalHBaseTableName(SYSTEM_SCHEMA_NAME, SYSTEM_CHILD_LINK_TABLE, true).getString());
- final String schemaName = "S_" + generateUniqueName();
- final String tableName = "T_" + generateUniqueName();
+ final String schemaName = "S_" + BaseTest.generateUniqueName();
+ final String tableName = "T_" + BaseTest.generateUniqueName();
final String fullTableName = schemaName + "." + tableName;
- final String viewName = "V_" + generateUniqueName();
+ final String viewName = "V_" + BaseTest.generateUniqueName();
verifyAllowed(createSchema(schemaName), superUser1);
verifyAllowed(createTable(fullTableName), superUser1);
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java
index 7ac79607ac7..03887fcc8c1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java
@@ -25,13 +25,14 @@
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.AuthUtil;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.security.access.Permission;
import org.apache.hadoop.hbase.security.access.Permission.Action;
import org.apache.hadoop.hbase.security.access.PermissionStorage;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
+import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.util.SchemaUtil;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -56,8 +57,8 @@ public void testPermissionsCachedWithAccessChecker() throws Throwable {
if (!isNamespaceMapped) {
return;
}
- final String schema = generateUniqueName();
- final String tableName = generateUniqueName();
+ final String schema = BaseTest.generateUniqueName();
+ final String tableName = BaseTest.generateUniqueName();
final String phoenixTableName = SchemaUtil.getTableName(schema, tableName);
try (Connection conn = getConnection()) {
grantPermissions(regularUser1.getShortName(), PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES,
@@ -82,9 +83,9 @@ public Void run() throws Exception {
}
});
verifyAllowed(createTable(phoenixTableName), regularUser1);
- HBaseTestingUtility utility = getUtility();
+ IntegrationTestingUtility utility = getUtility();
Configuration conf = utility.getConfiguration();
- ZKWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(utility);
+ ZKWatcher zkw = utility.getZooKeeperWatcher();
String aclZnodeParent = conf.get("zookeeper.znode.acl.parent", "acl");
String aclZNode = ZNodePaths.joinZNode(zkw.getZNodePaths().baseZNode, aclZnodeParent);
String tableZNode = ZNodePaths.joinZNode(aclZNode, "@" + schema);
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
index 7d839f4c16c..38f96d233a6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
@@ -961,7 +961,7 @@ public void testCreateViewOnExistingTable() throws Exception {
TableDescriptorBuilder builder =
TableDescriptorBuilder.newBuilder(TableName.valueOf(htableName));
for (byte[] familyName : familyNames) {
- builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(familyName));
+ builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(familyName));
}
admin.createTable(builder.build());
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryTimeoutIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryTimeoutIT.java
index 81b84d8ac2e..e13ac958bb8 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryTimeoutIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryTimeoutIT.java
@@ -32,7 +32,6 @@
import java.util.Map;
import java.util.Properties;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.phoenix.exception.SQLExceptionCode;
@@ -141,8 +140,7 @@ public void testQueryTimeout() throws Exception {
public static class SleepingRegionObserver extends SimpleRegionObserver {
@Override
- public void preScannerClose(ObserverContext c, InternalScanner s)
- throws IOException {
+ public void preScannerClose(ObserverContext c, InternalScanner s) throws IOException {
try {
Thread.sleep(1200); // Wait long enough
} catch (InterruptedException e) {
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RenewLeaseIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RenewLeaseIT.java
index 6f4f16a27b8..d4549a8d71e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RenewLeaseIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RenewLeaseIT.java
@@ -29,7 +29,6 @@
import java.util.Map;
import java.util.Properties;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
@@ -81,13 +80,13 @@ public SleepingRegionObserver() {
}
@Override
- public boolean preScannerNext(final ObserverContext c,
- final InternalScanner s, final List results, final int limit, final boolean hasMore)
- throws IOException {
+ public boolean preScannerNext(final ObserverContext c, final InternalScanner s,
+ final List results, final int limit, final boolean hasMore) throws IOException {
+ RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) c.getEnvironment();
try {
if (
- SLEEP_NOW && c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString()
- .equals(TABLE_NAME)
+ SLEEP_NOW
+ && env.getRegion().getRegionInfo().getTable().getNameAsString().equals(TABLE_NAME)
) {
Thread.sleep(2 * SCANNER_LEASE_TIMEOUT);
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogRollbackEnabledIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogRollbackEnabledIT.java
index ea352730bff..bf41fd646f2 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogRollbackEnabledIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogRollbackEnabledIT.java
@@ -32,7 +32,7 @@
import java.util.Map;
import java.util.Properties;
import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.phoenix.exception.SQLExceptionCode;
@@ -91,7 +91,7 @@ private Connection getTenantConnection(String tenantId) throws SQLException {
return DriverManager.getConnection(getUrl(), tenantProps);
}
- private void assertNumRegions(HBaseTestingUtility testUtil, TableName tableName,
+ private void assertNumRegions(IntegrationTestingUtility testUtil, TableName tableName,
int expectedNumRegions) throws IOException {
RegionLocator rl = testUtil.getConnection().getRegionLocator(tableName);
assertEquals(expectedNumRegions, rl.getAllRegionLocations().size());
@@ -103,7 +103,7 @@ private void assertNumRegions(HBaseTestingUtility testUtil, TableName tableName,
*/
@Test
public void testSystemCatalogDoesNotSplit() throws Exception {
- HBaseTestingUtility testUtil = getUtility();
+ IntegrationTestingUtility testUtil = getUtility();
for (int i = 0; i < 10; i++) {
createTableAndTenantViews("schema" + i + ".table_" + i);
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablesCreationOnConnectionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablesCreationOnConnectionIT.java
index 819feebcaa4..ecaa9bba291 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablesCreationOnConnectionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablesCreationOnConnectionIT.java
@@ -681,7 +681,7 @@ private void startMiniClusterWithToggleNamespaceMapping(String isNamespaceMappin
* @return Phoenix connection string
*/
private String getJdbcUrl() {
- return "jdbc:phoenix:localhost:" + testUtil.getZkCluster().getClientPort() + ":/hbase";
+ return "jdbc:phoenix+zk:localhost:" + testUtil.getZkCluster().getClientPort() + ":/hbase";
}
/**
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index c58946caae5..151faaacb0a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -41,7 +41,6 @@
import java.util.List;
import java.util.Map;
import java.util.Properties;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
@@ -258,8 +257,7 @@ public void testCreateMappedViewWithHbaseNamespace() throws Exception {
Connection conn1 = DriverManager.getConnection(getUrl(), props);
conn1.setAutoCommit(true);
- HBaseTestingUtility testUtil = getUtility();
- Admin admin = testUtil.getAdmin();
+ Admin admin = getUtility().getAdmin();
String nameSpace = generateUniqueName();
admin.createNamespace(NamespaceDescriptor.create(nameSpace).build());
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewMetadataIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewMetadataIT.java
index b3f28a08e93..02ef0cbd02c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewMetadataIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewMetadataIT.java
@@ -55,7 +55,6 @@
import java.util.List;
import java.util.Map;
import java.util.Properties;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
@@ -173,8 +172,7 @@ public void testCreateViewFromHBaseTable() throws Exception {
TableDescriptorBuilder.newBuilder(TableName.valueOf(tableNameStr));
builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(familyNameStr));
- HBaseTestingUtility testUtil = getUtility();
- Admin admin = testUtil.getAdmin();
+ Admin admin = getUtility().getAdmin();
admin.createTable(builder.build());
Connection conn = DriverManager.getConnection(getUrl());
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseImmutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseImmutableIndexIT.java
index aaeeea6d07f..dfc16132357 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseImmutableIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseImmutableIndexIT.java
@@ -50,7 +50,6 @@
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -476,16 +475,16 @@ public void testGlobalImmutableIndexDelete() throws Exception {
public static class DeleteFailingRegionObserver extends SimpleRegionObserver {
@Override
- public void preBatchMutate(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws IOException {
+ public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws IOException {
throw new DoNotRetryIOException();
}
}
public static class UpsertFailingRegionObserver extends SimpleRegionObserver {
@Override
- public void preBatchMutate(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws IOException {
+ public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws IOException {
throw new DoNotRetryIOException();
}
}
@@ -537,11 +536,10 @@ public void testCreateIndexDuringUpsertSelect() throws Exception {
// used to create an index while a batch of rows are being written
public static class CreateIndexRegionObserver extends SimpleRegionObserver {
@Override
- public void postPut(
- org.apache.hadoop.hbase.coprocessor.ObserverContext c, Put put,
+ public void postPut(org.apache.hadoop.hbase.coprocessor.ObserverContext c, Put put,
org.apache.hadoop.hbase.wal.WALEdit edit, Durability durability) throws java.io.IOException {
- String tableName =
- c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString();
+ RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) c.getEnvironment();
+ String tableName = env.getRegion().getRegionInfo().getTable().getNameAsString();
if (
tableName.equalsIgnoreCase(TABLE_NAME)
// create the index after the second batch
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexExtendedIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexExtendedIT.java
index 326fcdd4d66..0aa52160dec 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexExtendedIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexExtendedIT.java
@@ -31,13 +31,11 @@
import java.util.List;
import java.util.Map;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
import org.apache.hadoop.hbase.util.Bytes;
@@ -125,8 +123,8 @@ public static class PreMutationFailingRegionObserver extends SimpleRegionObserve
implements FailingRegionObserver {
@Override
- public void preBatchMutate(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws IOException {
+ public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws IOException {
throw new IOException();
}
@@ -140,8 +138,8 @@ public static class PostMutationFailingRegionObserver extends SimpleRegionObserv
implements FailingRegionObserver {
@Override
- public void postBatchMutate(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws IOException {
+ public void postBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws IOException {
throw new IOException();
}
@@ -157,8 +155,8 @@ public static class FailOnceMutationRegionObserver extends SimpleRegionObserver
private boolean failOnce = true;
@Override
- public void preBatchMutate(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws IOException {
+ public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws IOException {
if (failOnce) {
// next attempt don't raise
failOnce = false;
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTestUtil.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTestUtil.java
index 7429256362a..ef91619374e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTestUtil.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTestUtil.java
@@ -205,7 +205,6 @@ public static void downgradeCoprocs(String physicalTableName, String physicalInd
assertCoprocsContains(GlobalIndexChecker.class, indexDescriptor);
TableDescriptorBuilder indexDescBuilder = TableDescriptorBuilder.newBuilder(indexDescriptor);
- removeCoproc(IndexRegionObserver.class, indexDescBuilder, admin);
removeCoproc(GlobalIndexChecker.class, indexDescBuilder, admin);
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index 13a514f3304..7a5b21e92f4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -618,15 +618,16 @@ public static class FailingRegionObserver extends SimpleRegionObserver {
public static final String FAIL_TABLE_NAME = "FAIL_TABLE";
@Override
- public void preBatchMutate(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws IOException {
+ public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws IOException {
+ RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) c.getEnvironment();
boolean throwException = false;
if (FAIL_NEXT_WRITE) {
throwException = true;
FAIL_NEXT_WRITE = false;
} else if (
- c.getEnvironment().getRegionInfo().getTable().getNameAsString()
- .endsWith("A_" + FAIL_INDEX_NAME) && FAIL_WRITE
+ env.getRegionInfo().getTable().getNameAsString().endsWith("A_" + FAIL_INDEX_NAME)
+ && FAIL_WRITE
) {
throwException = true;
if (TOGGLE_FAIL_WRITE_FOR_RETRY) {
@@ -635,7 +636,7 @@ public void preBatchMutate(ObserverContext c,
} else {
// When local index updates are atomic with data updates, testing a write failure to a local
// index won't make sense.
- Mutation operation = miniBatchOp.getOperation(0);
+ Mutation operation = (Mutation) miniBatchOp.getOperation(0);
if (FAIL_WRITE) {
Map> cellMap = operation.getFamilyCellMap();
for (Map.Entry> entry : cellMap.entrySet()) {
@@ -643,7 +644,7 @@ public void preBatchMutate(ObserverContext c,
if (
Bytes.toString(family).startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)
) {
- int regionStartKeyLen = c.getEnvironment().getRegionInfo().getStartKey().length;
+ int regionStartKeyLen = env.getRegionInfo().getStartKey().length;
Cell firstCell = entry.getValue().get(0);
long indexId =
MetaDataUtil.getViewIndexIdDataType().getCodec().decodeLong(firstCell.getRowArray(),
@@ -665,7 +666,7 @@ public void preBatchMutate(ObserverContext c,
}
}
- private void dropIndex(ObserverContext c) {
+ private void dropIndex(ObserverContext c) {
try {
Connection connection = QueryUtil.getConnection(c.getEnvironment().getConfiguration());
connection.createStatement()
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexRebuilderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexRebuilderIT.java
index 98437c9967d..9a5a8460da2 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexRebuilderIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexRebuilderIT.java
@@ -36,7 +36,6 @@
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -169,11 +168,12 @@ public static class WriteFailingRegionObserver extends SimpleRegionObserver {
public static volatile AtomicInteger attempts = new AtomicInteger(0);
@Override
- public void postBatchMutate(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws IOException {
+ public void postBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws IOException {
+ RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) c.getEnvironment();
attempts.incrementAndGet();
- throw new DoNotRetryIOException("Simulating write failure on "
- + c.getEnvironment().getRegionInfo().getTable().getNameAsString());
+ throw new DoNotRetryIOException(
+ "Simulating write failure on " + env.getRegionInfo().getTable().getNameAsString());
}
}
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
index 2af3f5f5988..31bf73b69be 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
@@ -34,8 +34,8 @@
import java.util.Properties;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
@@ -97,8 +97,8 @@ public class MutableIndexReplicationIT extends BaseTest {
protected static Admin admin;
- protected static HBaseTestingUtility utility1;
- protected static HBaseTestingUtility utility2;
+ protected static IntegrationTestingUtility utility1;
+ protected static IntegrationTestingUtility utility2;
protected static final int REPLICATION_RETRIES = 100;
protected static final byte[] tableName = Bytes.toBytes("test");
@@ -128,7 +128,7 @@ private static void setupConfigsAndStartCluster() throws Exception {
conf1.setInt("replication.stats.thread.period.seconds", 5);
conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
- utility1 = new HBaseTestingUtility(conf1);
+ utility1 = new IntegrationTestingUtility(conf1);
utility1.startMiniZKCluster();
MiniZooKeeperCluster miniZK = utility1.getZkCluster();
// Have to reset conf1 in case zk cluster location different
@@ -145,7 +145,7 @@ private static void setupConfigsAndStartCluster() throws Exception {
conf2.setBoolean("dfs.support.append", true);
conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
- utility2 = new HBaseTestingUtility(conf2);
+ utility2 = new IntegrationTestingUtility(conf2);
utility2.setZkCluster(miniZK);
zkw2 = new ZKWatcher(conf2, "cluster2", null, true);
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitIT.java
index 2bdfa0898a7..1e361dbd998 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitIT.java
@@ -31,12 +31,14 @@
import java.util.Collection;
import java.util.List;
import java.util.Properties;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
+import org.apache.phoenix.jdbc.ConnectionInfo;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.schema.StaleRegionBoundaryCacheException;
import org.apache.phoenix.util.ByteUtil;
@@ -59,8 +61,13 @@ public MutableIndexSplitIT(boolean localIndex, boolean multiTenant) {
private static Connection getConnection(Properties props) throws SQLException {
props.setProperty(QueryServices.INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB, Integer.toString(1));
- Connection conn = DriverManager.getConnection(getUrl(), props);
- return conn;
+ props.setProperty(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, Integer.toString(1));
+ ConnectionInfo info = ConnectionInfo.create(getUrl(), null, props);
+ // HBase 3 Async implementation pre-caches scanner results in the background, which
+ // would break this test.
+ // The principal makes sure that a new CQSI is created with
+ // HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, and the scanner cache is just 2 rows.
+ return DriverManager.getConnection(info.withPrincipal("nocache").toUrl(), props);
}
@Parameters(name = "MutableIndexSplitIT_localIndex={0},multiTenant={1}") // name is used by
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
index 92dd1b9b938..6e52814c199 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
@@ -40,7 +40,6 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -1143,8 +1142,9 @@ public void testIndexFailureWithinRSDoesnotDisablesIndex() throws Throwable {
public static class WriteFailingRegionObserver extends SimpleRegionObserver {
@Override
- public void postBatchMutate(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws IOException {
+ public void postBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws IOException {
+ RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) c.getEnvironment();
// we need to advance the clock, since the index retry logic (copied from HBase) has a time
// component
EnvironmentEdge delegate = EnvironmentEdgeManager.getDelegate();
@@ -1152,8 +1152,8 @@ public void postBatchMutate(ObserverContext c,
MyClock myClock = (MyClock) delegate;
myClock.time += 1000;
}
- throw new DoNotRetryIOException("Simulating write failure on "
- + c.getEnvironment().getRegionInfo().getTable().getNameAsString());
+ throw new DoNotRetryIOException(
+ "Simulating write failure on " + env.getRegionInfo().getTable().getNameAsString());
}
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ReplicationWithWALAnnotationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ReplicationWithWALAnnotationIT.java
index 3293a9525aa..21d4f88aefd 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ReplicationWithWALAnnotationIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ReplicationWithWALAnnotationIT.java
@@ -33,8 +33,8 @@
import java.util.Properties;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -46,7 +46,6 @@
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
@@ -108,8 +107,8 @@ public class ReplicationWithWALAnnotationIT extends BaseTest {
protected static ZKWatcher zkw1;
protected static ZKWatcher zkw2;
- protected static HBaseTestingUtility utility1;
- protected static HBaseTestingUtility utility2;
+ protected static IntegrationTestingUtility utility1;
+ protected static IntegrationTestingUtility utility2;
protected static final int REPLICATION_RETRIES = 10;
protected static final byte[] tableName = Bytes.toBytes("test");
@@ -153,7 +152,7 @@ private static void setupConfigsAndStartCluster() throws Exception {
conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
setUpConfigForMiniCluster(conf1);
- utility1 = new HBaseTestingUtility(conf1);
+ utility1 = new IntegrationTestingUtility(conf1);
utility1.startMiniZKCluster();
conf1 = utility1.getConfiguration();
@@ -166,7 +165,7 @@ private static void setupConfigsAndStartCluster() throws Exception {
conf2.setBoolean("dfs.support.append", true);
conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
- utility2 = new HBaseTestingUtility(conf2);
+ utility2 = new IntegrationTestingUtility(conf2);
utility2.startMiniZKCluster();
zkw2 = new ZKWatcher(conf2, "cluster2", null, true);
@@ -510,16 +509,17 @@ public Optional getRegionObserver() {
}
@Override
- public void preBatchMutate(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws IOException {
- String tenantId = Bytes.toString(miniBatchOp.getOperation(0)
- .getAttribute(MutationState.MutationMetadataType.TENANT_ID.toString()));
- String schemaName = Bytes.toString(miniBatchOp.getOperation(0)
- .getAttribute(MutationState.MutationMetadataType.SCHEMA_NAME.toString()));
- String logicalTableName = Bytes.toString(miniBatchOp.getOperation(0)
- .getAttribute(MutationState.MutationMetadataType.LOGICAL_TABLE_NAME.toString()));
- String tableType = Bytes.toString(miniBatchOp.getOperation(0)
- .getAttribute(MutationState.MutationMetadataType.TABLE_TYPE.toString()));
+ public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws IOException {
+ Mutation m = (Mutation) miniBatchOp.getOperation(0);
+ String tenantId =
+ Bytes.toString(m.getAttribute(MutationState.MutationMetadataType.TENANT_ID.toString()));
+ String schemaName =
+ Bytes.toString(m.getAttribute(MutationState.MutationMetadataType.SCHEMA_NAME.toString()));
+ String logicalTableName = Bytes
+ .toString(m.getAttribute(MutationState.MutationMetadataType.LOGICAL_TABLE_NAME.toString()));
+ String tableType =
+ Bytes.toString(m.getAttribute(MutationState.MutationMetadataType.TABLE_TYPE.toString()));
LOGGER.info(
"TestCoprocessorForWALAnnotationAtSink preBatchMutate: tenantId: {}, schemaName: {}, "
@@ -546,16 +546,18 @@ public Optional getRegionObserver() {
}
@Override
- public void preBatchMutate(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws IOException {
- String tenantId = Bytes.toString(miniBatchOp.getOperation(0)
- .getAttribute(MutationState.MutationMetadataType.TENANT_ID.toString()));
- String schemaName = Bytes.toString(miniBatchOp.getOperation(0)
- .getAttribute(MutationState.MutationMetadataType.SCHEMA_NAME.toString()));
- String logicalTableName = Bytes.toString(miniBatchOp.getOperation(0)
- .getAttribute(MutationState.MutationMetadataType.LOGICAL_TABLE_NAME.toString()));
- String tableType = Bytes.toString(miniBatchOp.getOperation(0)
- .getAttribute(MutationState.MutationMetadataType.TABLE_TYPE.toString()));
+ public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws IOException {
+ Mutation m = (Mutation) miniBatchOp.getOperation(0);
+
+ String tenantId =
+ Bytes.toString(m.getAttribute(MutationState.MutationMetadataType.TENANT_ID.toString()));
+ String schemaName =
+ Bytes.toString(m.getAttribute(MutationState.MutationMetadataType.SCHEMA_NAME.toString()));
+ String logicalTableName = Bytes
+ .toString(m.getAttribute(MutationState.MutationMetadataType.LOGICAL_TABLE_NAME.toString()));
+ String tableType =
+ Bytes.toString(m.getAttribute(MutationState.MutationMetadataType.TABLE_TYPE.toString()));
LOGGER.info(
"TestCoprocessorForWALAnnotationAtSink preBatchMutate: tenantId: {}, schemaName: {}, "
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/UncoveredGlobalIndexRegionScanner2IT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/UncoveredGlobalIndexRegionScanner2IT.java
index a008150d6b6..0c7f00f81a0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/UncoveredGlobalIndexRegionScanner2IT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/UncoveredGlobalIndexRegionScanner2IT.java
@@ -53,7 +53,6 @@
import org.apache.hadoop.hbase.client.RegionStatesCount;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants;
import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
@@ -1063,8 +1062,7 @@ public static void resetCount() {
}
@Override
- public void preScannerOpen(final ObserverContext c,
- final Scan scan) {
+ public void preScannerOpen(final ObserverContext c, final Scan scan) {
if (scan.getFilter() instanceof SkipScanFilter) {
List> slots = ((SkipScanFilter) scan.getFilter()).getSlots();
for (List ranges : slots) {
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/UncoveredGlobalIndexRegionScannerIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/UncoveredGlobalIndexRegionScannerIT.java
index 641fcaa2eaf..5b7b69900ac 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/UncoveredGlobalIndexRegionScannerIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/UncoveredGlobalIndexRegionScannerIT.java
@@ -38,7 +38,6 @@
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
import org.apache.phoenix.exception.PhoenixParserException;
@@ -898,8 +897,7 @@ public static void resetCount() {
}
@Override
- public void preScannerOpen(final ObserverContext c,
- final Scan scan) {
+ public void preScannerOpen(final ObserverContext c, final Scan scan) {
if (scan.getFilter() instanceof SkipScanFilter) {
List> slots = ((SkipScanFilter) scan.getFilter()).getSlots();
for (List ranges : slots) {
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/TxWriteFailureIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/TxWriteFailureIT.java
index 659f1b01ef1..ada18823945 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/TxWriteFailureIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/TxWriteFailureIT.java
@@ -35,7 +35,6 @@
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
@@ -189,8 +188,7 @@ private void helpTestWriteFailure(boolean indexTableWriteFailure) throws SQLExce
public static class FailingRegionObserver extends SimpleRegionObserver {
@Override
- public void prePut(
- org.apache.hadoop.hbase.coprocessor.ObserverContext c, Put put,
+ public void prePut(org.apache.hadoop.hbase.coprocessor.ObserverContext c, Put put,
org.apache.hadoop.hbase.wal.WALEdit edit, Durability durability) throws java.io.IOException {
if (shouldFailUpsert(c, put)) {
// throwing anything other than instances of IOException result
@@ -201,7 +199,7 @@ public void prePut(
}
}
- private boolean shouldFailUpsert(ObserverContext c, Put put) {
+ private boolean shouldFailUpsert(ObserverContext c, Put put) {
return Bytes.contains(put.getRow(), Bytes.toBytes(ROW_TO_FAIL));
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/HashJoinCacheIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/HashJoinCacheIT.java
index 076d7c0f487..220efe33a95 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/HashJoinCacheIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/HashJoinCacheIT.java
@@ -89,11 +89,11 @@ public static class InvalidateHashCache extends SimpleRegionObserver {
public static List lastRemovedJoinIds = new ArrayList();
@Override
- public void preScannerOpen(final ObserverContext c,
- final Scan scan) {
+ public void preScannerOpen(final ObserverContext c, final Scan scan) {
+ RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) c.getEnvironment();
final HashJoinInfo joinInfo = HashJoinInfo.deserializeHashJoinFromScan(scan);
if (joinInfo != null) {
- TenantCache cache = GlobalCache.getTenantCache(c.getEnvironment(), null);
+ TenantCache cache = GlobalCache.getTenantCache(env, null);
int count = joinInfo.getJoinIds().length;
for (int i = 0; i < count; i++) {
ImmutableBytesPtr joinId = joinInfo.getJoinIds()[i];
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
index 8342ffc663d..b6be499e458 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
@@ -381,8 +381,8 @@ protected MutationState newMutationState(int maxSize, long maxSizeBytes) {
public static class FailingRegionObserver extends SimpleRegionObserver {
@Override
- public void prePut(ObserverContext c, Put put, WALEdit edit,
- final Durability durability) throws HBaseIOException {
+ public void prePut(ObserverContext c, Put put, WALEdit edit, final Durability durability)
+ throws HBaseIOException {
if (shouldFail(c, put)) {
// throwing anything other than instances of IOException result
// in this coprocessor being unloaded
@@ -393,8 +393,8 @@ public void prePut(ObserverContext c, Put put, WAL
}
@Override
- public void preDelete(ObserverContext c, Delete delete,
- WALEdit edit, Durability durability) throws IOException {
+ public void preDelete(ObserverContext c, Delete delete, WALEdit edit, Durability durability)
+ throws IOException {
if (shouldFail(c, delete)) {
// throwing anything other than instances of IOException result
// in this coprocessor being unloaded
@@ -404,9 +404,9 @@ public void preDelete(ObserverContext c, Delete de
}
}
- private boolean shouldFail(ObserverContext c, Mutation m) {
- String tableName =
- c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString();
+ private boolean shouldFail(ObserverContext c, Mutation m) {
+ RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) c.getEnvironment();
+ String tableName = env.getRegion().getRegionInfo().getTable().getNameAsString();
// deletes on transactional tables are converted to put, so use a single helper method
return tableName.contains(TABLE_NAME_TO_FAIL)
&& (Bytes.equals(ROW_TO_FAIL_UPSERT_BYTES, m.getRow())
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java b/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java
index 89374edb179..5c79c46e0fb 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java
@@ -36,14 +36,13 @@
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.DoNotRetryRegionException;
-import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -211,7 +210,7 @@ public void testSplitDuringUpsertSelect() throws Exception {
}
// keep trying to split the region
- final HBaseTestingUtility utility = getUtility();
+ final IntegrationTestingUtility utility = getUtility();
final Admin admin = utility.getAdmin();
final TableName dataTN = TableName.valueOf(dataTable);
assertEquals(1, utility.getHBaseCluster().getRegions(dataTN).size());
@@ -263,7 +262,7 @@ public void testRegionCloseDuringUpsertSelect() throws Exception {
Thread.sleep(300);
}
- final HBaseTestingUtility utility = getUtility();
+ final IntegrationTestingUtility utility = getUtility();
// try to close the region while UPSERT SELECTs are happening,
final HRegionServer dataRs = utility.getHBaseCluster().getRegionServer(0);
final Admin admin = utility.getAdmin();
@@ -296,12 +295,13 @@ public static class SlowBatchRegionObserver extends SimpleRegionObserver {
public static volatile boolean SLOW_MUTATE = false;
@Override
- public void preBatchMutate(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws HBaseIOException {
+ public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws HBaseIOException {
// model a slow batch that takes a long time
+ RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) c.getEnvironment();
if (
(miniBatchOp.size() == 100 || SLOW_MUTATE)
- && c.getEnvironment().getRegionInfo().getTable().getNameAsString().equals(dataTable)
+ && env.getRegionInfo().getTable().getNameAsString().equals(dataTable)
) {
try {
Thread.sleep(6000);
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/jdbc/HighAvailabilityTestingUtility.java b/phoenix-core/src/it/java/org/apache/phoenix/jdbc/HighAvailabilityTestingUtility.java
index f87d57cbb92..213cd880251 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/jdbc/HighAvailabilityTestingUtility.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/jdbc/HighAvailabilityTestingUtility.java
@@ -54,6 +54,7 @@
import org.apache.curator.framework.CuratorFramework;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
import org.apache.hadoop.hbase.regionserver.RSRpcServices;
@@ -663,7 +664,6 @@ private static void setUpDefaultHBaseConfig(Configuration conf) {
*/
conf.setInt(HConstants.REGION_SERVER_HANDLER_COUNT, 5);
conf.setInt("hbase.regionserver.metahandler.count", 2);
- conf.setInt(HConstants.MASTER_HANDLER_COUNT, 2);
conf.setInt("dfs.namenode.handler.count", 2);
conf.setInt("dfs.namenode.service.handler.count", 2);
conf.setInt("dfs.datanode.handler.count", 2);
@@ -730,7 +730,7 @@ public static void doTestBasicOperationsWithStatement(Connection conn, Statement
public static HighAvailabilityGroup getHighAvailibilityGroup(String jdbcUrl,
Properties clientProperties) throws TimeoutException, InterruptedException {
AtomicReference haGroupRef = new AtomicReference<>();
- GenericTestUtils.waitFor(() -> {
+ Waiter.waitFor(HBaseConfiguration.create(), 180_000, 1000, () -> {
try {
Optional haGroup =
HighAvailabilityGroup.get(jdbcUrl, clientProperties);
@@ -742,7 +742,7 @@ public static HighAvailabilityGroup getHighAvailibilityGroup(String jdbcUrl,
} catch (SQLException throwables) {
return false;
}
- }, 1_000, 180_000);
+ });
return haGroupRef.get();
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/jdbc/ParallelPhoenixNullComparingResultSetIT.java b/phoenix-core/src/it/java/org/apache/phoenix/jdbc/ParallelPhoenixNullComparingResultSetIT.java
index d14f00ae8ad..32c46437924 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/jdbc/ParallelPhoenixNullComparingResultSetIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/jdbc/ParallelPhoenixNullComparingResultSetIT.java
@@ -35,7 +35,7 @@
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.jdbc.HighAvailabilityTestingUtility.HBaseTestingUtilityPair;
@@ -87,10 +87,10 @@ public static void setUpBeforeClass() throws Exception {
LOG.info("Initialized haGroup {} with URL {}", haGroup.getGroupInfo().getName(), jdbcUrl);
CLUSTERS.createTableOnClusterPair(haGroup, tableName, false);
// Disable replication from cluster 1 to cluster 2
- ReplicationAdmin admin = new ReplicationAdmin(CLUSTERS.getHBaseCluster1().getConfiguration());
- admin.removePeer(PHOENIX_HA_GROUP_STORE_PEER_ID_DEFAULT);
- ReplicationAdmin admin2 = new ReplicationAdmin(CLUSTERS.getHBaseCluster2().getConfiguration());
- admin2.removePeer(PHOENIX_HA_GROUP_STORE_PEER_ID_DEFAULT);
+ Admin admin = CLUSTERS.getHBaseCluster1().getAdmin();
+ admin.removeReplicationPeer(PHOENIX_HA_GROUP_STORE_PEER_ID_DEFAULT);
+ Admin admin2 = CLUSTERS.getHBaseCluster2().getAdmin();
+ admin2.removeReplicationPeer(PHOENIX_HA_GROUP_STORE_PEER_ID_DEFAULT);
}
@AfterClass
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/jdbc/PhoenixHAAdminToolIT.java b/phoenix-core/src/it/java/org/apache/phoenix/jdbc/PhoenixHAAdminToolIT.java
index 44c7700da8e..6b3c37dddd1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/jdbc/PhoenixHAAdminToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/jdbc/PhoenixHAAdminToolIT.java
@@ -17,7 +17,7 @@
*/
package org.apache.phoenix.jdbc;
-import static org.apache.hadoop.hbase.GenericTestUtils.waitFor;
+import static org.apache.hadoop.hbase.Waiter.waitFor;
import static org.apache.phoenix.jdbc.PhoenixHAAdminTool.RET_REPAIR_FOUND_INCONSISTENCIES;
import static org.apache.phoenix.jdbc.PhoenixHAAdminTool.RET_SUCCESS;
import static org.apache.phoenix.jdbc.PhoenixHAAdminTool.RET_SYNC_ERROR;
@@ -282,7 +282,7 @@ private static void doVerifyClusterRole(ClusterRoleRecord clusterRoleRecord) thr
*/
private static void doVerifyClusterRole(CuratorFramework curator,
ClusterRoleRecord clusterRoleRecord) throws Exception {
- waitFor(() -> {
+ waitFor(CLUSTERS.getHBaseCluster1().getConfiguration(), 15_000, 1_000, () -> {
try {
String path = ZKPaths.PATH_SEPARATOR + clusterRoleRecord.getHaGroupName();
byte[] data = curator.getData().forPath(path);
@@ -292,7 +292,7 @@ private static void doVerifyClusterRole(CuratorFramework curator,
LOG.info("Got exception while waiting for znode is up to date: {}", e.getMessage());
return false;
}
- }, 1_000, 15_000);
+ });
}
private static CuratorFramework getCurator1() throws IOException {
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionQueryServicesImplThreadPoolIT.java b/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionQueryServicesImplThreadPoolIT.java
index 1bf260f461c..ba3b29df3e5 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionQueryServicesImplThreadPoolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionQueryServicesImplThreadPoolIT.java
@@ -26,7 +26,6 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
import java.lang.reflect.Field;
import java.sql.Connection;
@@ -39,8 +38,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Table;
import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
import org.apache.phoenix.end2end.ServerMetadataCacheTestImpl;
@@ -142,17 +139,15 @@ private String connUrlWithPrincipal(String principalName) throws SQLException {
@Test
public void checkHTableThreadPoolExecutorSame() throws Exception {
Table table = createCQSI(null).getTable(tableName.getBytes());
- assertTrue(table instanceof HTable);
- HTable hTable = (HTable) table;
- Field props = hTable.getClass().getDeclaredField("pool");
+ Field props = table.getClass().getDeclaredField("pool");
props.setAccessible(true);
- validateThreadPoolExecutor((ThreadPoolExecutor) props.get(hTable));
+ validateThreadPoolExecutor((ThreadPoolExecutor) props.get(table));
}
@Test
public void checkHConnectionThreadPoolExecutorSame() throws Exception {
// Extract Conn1 instance from CQSI1
- ClusterConnection conn1 = extractConnectionFromCQSI(createCQSI("hello"));
+ Connection conn1 = extractConnectionFromCQSI(createCQSI("hello"));
// Extract batchPool from connection in CQSI1
ThreadPoolExecutor threadPoolExecutor1FromConnection = extractBatchPool(conn1);
// Create another CQSI2
@@ -160,7 +155,7 @@ public void checkHConnectionThreadPoolExecutorSame() throws Exception {
// Extract the ThreadPoolExecutor from CQSI2 instance
ThreadPoolExecutor threadPoolExecutor2 = extractThreadPoolExecutorFromCQSI(connQueryServices2);
// Extract Conn2 from CQSI2
- ClusterConnection conn2 = extractConnectionFromCQSI(createCQSI("bye"));
+ Connection conn2 = extractConnectionFromCQSI(createCQSI("bye"));
// Extract batchPool from connection2 in CQSI2
ThreadPoolExecutor threadPoolExecutor2FromConnection = extractBatchPool(conn2);
// Check if ThreadPoolExecutor2 from CQSI and from Connection are Same
@@ -174,7 +169,7 @@ public void checkHConnectionThreadPoolExecutorSame() throws Exception {
validateThreadPoolExecutor(threadPoolExecutor2);
}
- private static ThreadPoolExecutor extractBatchPool(ClusterConnection conn)
+ private static ThreadPoolExecutor extractBatchPool(Connection conn)
throws NoSuchFieldException, IllegalAccessException {
Field batchPoolField = conn.getClass().getDeclaredField("batchPool");
batchPoolField.setAccessible(true);
@@ -284,10 +279,10 @@ private void createTable(String tableName) throws SQLException {
}
}
- private ClusterConnection extractConnectionFromCQSI(ConnectionQueryServices cqsi)
+ private Connection extractConnectionFromCQSI(ConnectionQueryServices cqsi)
throws NoSuchFieldException, IllegalAccessException {
Field connectionField1 = cqsi.getClass().getDeclaredField("connection");
connectionField1.setAccessible(true);
- return (ClusterConnection) connectionField1.get(cqsi);
+ return (Connection) connectionField1.get(cqsi);
}
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
index 2c1752ba028..f93583a8159 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
@@ -32,8 +32,7 @@
import java.util.List;
import java.util.Map;
import java.util.Properties;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
@@ -209,9 +208,8 @@ private void ensureTablesOnDifferentRegionServers(String tableName1, String tabl
byte[] table1 = Bytes.toBytes(tableName1);
byte[] table2 = Bytes.toBytes(tableName2);
Admin admin = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES).getAdmin();
- HBaseTestingUtility util = getUtility();
- MiniHBaseCluster cluster = util.getHBaseCluster();
- HMaster master = cluster.getMaster();
+ IntegrationTestingUtility util = getUtility();
+ HMaster master = util.getHBaseCluster().getMaster();
AssignmentManager am = master.getAssignmentManager();
// verify there is only a single region for data table
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
index 3b3304243a1..2ceef7676cf 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
@@ -47,10 +47,8 @@
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
import org.apache.hadoop.hbase.util.Bytes;
@@ -135,8 +133,8 @@ public void testFailureToRollbackAfterDelete() throws Exception {
public static class WriteFailingRegionObserver extends SimpleRegionObserver {
@Override
- public void preBatchMutate(ObserverContext c,
- MiniBatchOperationInProgress miniBatchOp) throws HBaseIOException {
+ public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp)
+ throws HBaseIOException {
throw new DoNotRetryIOException();
}
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryIT.java
index 5d5af556101..621c8ff6541 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryIT.java
@@ -26,7 +26,6 @@
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -92,8 +91,7 @@ public void testCachedConnections() throws Exception {
final Connection conn = DriverManager.getConnection(getUrl());
final Admin admin = getUtility().getAdmin();
- final MiniHBaseCluster cluster = getUtility().getHBaseCluster();
- final HRegionServer regionServer = cluster.getRegionServer(0);
+ final HRegionServer regionServer = getUtility().getHBaseCluster().getRegionServer(0);
Configuration conf = admin.getConfiguration();
final int noOfOrgs = 20;
final AtomicBoolean flag = new AtomicBoolean();
diff --git a/phoenix-core/src/it/resources/compatible_client_versions.json b/phoenix-core/src/it/resources/compatible_client_versions.json
index e48386a44c3..009dd7a549f 100644
--- a/phoenix-core/src/it/resources/compatible_client_versions.json
+++ b/phoenix-core/src/it/resources/compatible_client_versions.json
@@ -46,5 +46,6 @@
"version": "2.18.0",
"groupId": "org.apache.logging.log4j"
}
- ]
+ ],
+ "3.0": []
}
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java
index c7052960829..08f443d406c 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Delete;
@@ -138,7 +139,7 @@ private void writeWALEdit(WALCellCodec codec, List kvs, FSDataOutputStream
Codec.Encoder cellEncoder = codec.getEncoder(out);
// We interleave the two lists for code simplicity
for (Cell kv : kvs) {
- cellEncoder.write(kv);
+ cellEncoder.write((ExtendedCell) kv);
}
}
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/coprocessor/TaskMetaDataEndpointTest.java b/phoenix-core/src/test/java/org/apache/phoenix/coprocessor/TaskMetaDataEndpointTest.java
index ed1934c9919..945a0cde2c7 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/coprocessor/TaskMetaDataEndpointTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/coprocessor/TaskMetaDataEndpointTest.java
@@ -37,6 +37,7 @@
import org.apache.hadoop.hbase.regionserver.OnlineRegions;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compat.hbase.CompatRegionCoprocessorEnvironment;
import org.apache.phoenix.coprocessor.generated.MetaDataProtos;
import org.apache.phoenix.coprocessor.generated.TaskMetaDataProtos;
import org.apache.phoenix.protobuf.ProtobufUtil;
@@ -70,7 +71,7 @@ public class TaskMetaDataEndpointTest {
public void setUp() throws Exception {
MockitoAnnotations.initMocks(this);
configuration = new Configuration();
- RegionCoprocessorEnvironment environment = new RegionCoprocessorEnvironment() {
+ RegionCoprocessorEnvironment environment = new CompatRegionCoprocessorEnvironment() {
@Override
public Region getRegion() {
@@ -151,6 +152,7 @@ public Configuration getConfiguration() {
public ClassLoader getClassLoader() {
return null;
}
+
};
taskMetaDataEndpoint = new TaskMetaDataEndpoint();
taskMetaDataEndpoint.start(environment);
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java
index d7b34234d70..44470f4734b 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java
@@ -25,6 +25,7 @@
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.KeyValueUtil;
@@ -181,7 +182,7 @@ public void testCorrectRollback() throws Exception {
LocalTableState table = new LocalTableState(cachedLocalTable, m);
// add the kvs from the mutation
- KeyValue kv = KeyValueUtil.ensureKeyValue(m.get(fam, qual).get(0));
+ KeyValue kv = KeyValueUtil.ensureKeyValue((ExtendedCell) m.get(fam, qual).get(0));
kv.setSequenceId(0);
table.addPendingUpdates(kv);
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java
index bf8c57d8f20..6d4232f8c52 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java
@@ -158,7 +158,7 @@ public RegionScanner answer(InvocationOnMock invocation) throws Throwable {
private RegionScanner getMockTimeRangeRegionScanner(final TimeRange timeRange) {
return new BaseRegionScanner(Mockito.mock(RegionScanner.class)) {
@Override
- public boolean next(List results) throws IOException {
+ public boolean next(List results) throws IOException {
for (Cell cell : currentRowCells) {
if (
cell.getTimestamp() >= timeRange.getMin() && cell.getTimestamp() < timeRange.getMax()
@@ -169,7 +169,7 @@ public boolean next(List results) throws IOException {
return false; // indicate no more results
}
- public boolean next(List result, ScannerContext scannerContext) throws IOException {
+ public boolean next(List result, ScannerContext scannerContext) throws IOException {
return next(result);
}
};
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
index 2b916b65098..afba6d46831 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
@@ -23,6 +23,7 @@
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
@@ -36,6 +37,7 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -48,7 +50,6 @@
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
@@ -112,10 +113,9 @@ public Optional getRegionObserver() {
return Optional.of(this);
}
- @Override
- public void preWALRestore(
- org.apache.hadoop.hbase.coprocessor.ObserverContext<
- ? extends RegionCoprocessorEnvironment> ctx,
+ // FIXME this hook does not exist in HBase 3.
+ // Skip test ?
+ public void preWALRestore(org.apache.hadoop.hbase.coprocessor.ObserverContext ctx,
org.apache.hadoop.hbase.client.RegionInfo info, WALKey logKey,
org.apache.hadoop.hbase.wal.WALEdit logEdit) throws IOException {
try {
@@ -170,7 +170,7 @@ public void testWaitsOnIndexRegionToReload() throws Exception {
// start the cluster with 2 rs
util.startMiniCluster(2);
- Admin admin = util.getHBaseAdmin();
+ Admin admin = util.getAdmin();
// setup the index
byte[] family = Bytes.toBytes("family");
byte[] qual = Bytes.toBytes("qualifier");
@@ -184,8 +184,10 @@ public void testWaitsOnIndexRegionToReload() throws Exception {
// create the primary table w/ indexing enabled
TableDescriptor primaryTable =
TableDescriptorBuilder.newBuilder(TableName.valueOf(testTable.getTableName()))
- .addColumnFamily(ColumnFamilyDescriptorBuilder.of(family))
- .addColumnFamily(ColumnFamilyDescriptorBuilder.of(nonIndexedFamily)).build();
+ .setColumnFamilies(
+ Arrays.asList(new ColumnFamilyDescriptor[] { ColumnFamilyDescriptorBuilder.of(family),
+ ColumnFamilyDescriptorBuilder.of(nonIndexedFamily) }))
+ .build();
builder.addArbitraryConfigForTesting(Indexer.RecoveryFailurePolicyKeyForTesting,
ReleaseLatchOnFailurePolicy.class.getName());
builder.build(primaryTable);
@@ -194,7 +196,7 @@ public void testWaitsOnIndexRegionToReload() throws Exception {
// create the index table
TableDescriptorBuilder indexTableBuilder =
TableDescriptorBuilder.newBuilder(TableName.valueOf(Bytes.toBytes(getIndexTableName())))
- .addCoprocessor(IndexTableBlockingReplayObserver.class.getName());
+ .setCoprocessor(IndexTableBlockingReplayObserver.class.getName());
TestIndexManagementUtil.createIndexTable(admin, indexTableBuilder);
// figure out where our tables live
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
index 20605359dc0..63fc02696f4 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
@@ -109,7 +109,7 @@ public void setUp() throws Exception {
new WALFactory(TEST_UTIL.getConfiguration(), getClass().getSimpleName());
wal = walFactory.getWAL(RegionInfoBuilder.newBuilder(TableName.valueOf("logs")).build());
TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName)
- .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("a")).build())
+ .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("a")).build())
.build();
r1 = new HRegion(basedir, wal, newFS, TEST_UTIL.getConfiguration(), hri, htd, null) {
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java
index 79e6403a21a..07f1d200551 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java
@@ -566,8 +566,16 @@ public void testZkQuorumConfigs() throws Exception {
assertEquals("127.23.45.678:7634,host123.48576:723,localhost:2181,v3:1",
props.get(HConstants.CLIENT_ZOOKEEPER_QUORUM));
- connectionInfo = ConnectionInfo.create("jdbc:phoenix:"
- + "localhost\\:2181,127.23.45.678\\:7634,v3\\:1,host123.48576\\:723:/hbase;" + "test=true",
+ connectionInfo = ConnectionInfo.create("jdbc:phoenix+rpc:"
+ + "localhost\\:2181,127.23.45.678\\:7634,v3\\:1,host123.48576\\:723::;" + "test=true", null,
+ null);
+ props = connectionInfo.asProps();
+ assertNull(props.get(HConstants.ZOOKEEPER_QUORUM));
+ assertNull(props.get(HConstants.CLIENT_ZOOKEEPER_QUORUM));
+
+ connectionInfo = ConnectionInfo.create(
+ "jdbc:phoenix+zk:"
+ + "localhost\\:2181,127.23.45.678\\:7634,v3\\:1,host123.48576\\:723:/hbase;" + "test=true",
null, null);
props = connectionInfo.asProps();
assertEquals("127.23.45.678:7634,host123.48576:723,localhost:2181,v3:1",
@@ -576,19 +584,13 @@ public void testZkQuorumConfigs() throws Exception {
props.get(HConstants.CLIENT_ZOOKEEPER_QUORUM));
connectionInfo = ConnectionInfo.create(
- "jdbc:phoenix:" + "localhost,v3,127.23.45.678,host987:12345:/hbase;" + "test=true", null,
+ "jdbc:phoenix+zk:" + "localhost,v3,127.23.45.678,host987:12345:/hbase;" + "test=true", null,
null);
props = connectionInfo.asProps();
assertEquals("127.23.45.678:12345,host987:12345,localhost:12345,v3:12345",
props.get(HConstants.ZOOKEEPER_QUORUM));
assertEquals("127.23.45.678:12345,host987:12345,localhost:12345,v3:12345",
props.get(HConstants.CLIENT_ZOOKEEPER_QUORUM));
- assumeTrue(VersionInfo.compareVersion(VersionInfo.getVersion(), "2.5.0") >= 0);
- connectionInfo = ConnectionInfo.create("jdbc:phoenix+rpc:"
- + "localhost\\:2181,127.23.45.678\\:7634,v3\\:1,host123.48576\\:723::;" + "test=true", null,
- null);
- props = connectionInfo.asProps();
- assertNull(props.get(HConstants.ZOOKEEPER_QUORUM));
- assertNull(props.get(HConstants.CLIENT_ZOOKEEPER_QUORUM));
+
}
}
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 7b14996fba4..35653dbcbc1 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -118,13 +118,10 @@
import javax.annotation.Nonnull;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.IntegrationTestingUtility;
-import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.RegionMetrics;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.StartMiniClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -341,7 +338,10 @@ protected static String getZKClientPort(Configuration conf) {
protected static String url;
protected static PhoenixTestDriver driver;
protected static boolean clusterInitialized = false;
- protected static HBaseTestingUtility utility;
+ // We're using IntegrationTestingUtility because that extends HBaseTestingUtil/HBaseTestingUtility
+ // and works with either HBase 2/3. We could have also created a PhoenixTestingUtility in the
+ // compatibility moduley that does the same, and used that.
+ protected static IntegrationTestingUtility utility;
protected static final Configuration config = HBaseConfiguration.create();
protected static String getUrl() {
@@ -473,16 +473,11 @@ private static boolean isDistributedClusterModeEnabled(Configuration conf) {
private static synchronized String initMiniCluster(Configuration conf,
ReadOnlyProps overrideProps) throws Exception {
setUpConfigForMiniCluster(conf, overrideProps);
- utility = new HBaseTestingUtility(conf);
+ utility = new IntegrationTestingUtility(conf);
try {
long startTime = System.currentTimeMillis();
- StartMiniClusterOption.Builder builder = StartMiniClusterOption.builder();
- builder.numMasters(overrideProps.getInt(QueryServices.TESTS_MINI_CLUSTER_NUM_MASTERS, 1));
- int numSlaves =
- overrideProps.getInt(QueryServices.TESTS_MINI_CLUSTER_NUM_REGION_SERVERS, NUM_SLAVES_BASE);
- builder.numRegionServers(numSlaves);
- builder.numDataNodes(numSlaves);
- utility.startMiniCluster(builder.build());
+ utility.startMiniCluster(
+ overrideProps.getInt(QueryServices.TESTS_MINI_CLUSTER_NUM_REGION_SERVERS, NUM_SLAVES_BASE));
long startupTime = System.currentTimeMillis() - startTime;
LOGGER.info("HBase minicluster startup complete in {} ms", startupTime);
return getLocalClusterUrl(utility);
@@ -491,12 +486,12 @@ private static synchronized String initMiniCluster(Configuration conf,
}
}
- protected static String getLocalClusterUrl(HBaseTestingUtility util) throws Exception {
+ protected static String getLocalClusterUrl(IntegrationTestingUtility util) throws Exception {
String url = QueryUtil.getConnectionUrl(new Properties(), util.getConfiguration());
return url + PHOENIX_TEST_DRIVER_URL_PARAM;
}
- protected static String getLocalClusterUrl(HBaseTestingUtility util, String principal)
+ protected static String getLocalClusterUrl(IntegrationTestingUtility util, String principal)
throws Exception {
String url = QueryUtil.getConnectionUrl(new Properties(), util.getConfiguration(), principal);
return url + PHOENIX_TEST_DRIVER_URL_PARAM;
@@ -559,7 +554,10 @@ public static Configuration setUpConfigForMiniCluster(Configuration conf,
* really needed by phoenix for test purposes. Limiting these threads helps us in running
* several mini clusters at the same time without hitting the threads limit imposed by the OS.
*/
- conf.setInt(HConstants.REGION_SERVER_HANDLER_COUNT, 5);
+ // This was set to 5, which worked with HBase 2, but HBase 3 runs out of handlers and tests
+ // hang even with 10 handlers.
+ // TODO is this expected and normal, or does this indicate a bug in HBase 3 or Phoenix ?
+ conf.setInt(HConstants.REGION_SERVER_HANDLER_COUNT, 20);
conf.setInt("hbase.regionserver.metahandler.count", 2);
conf.setInt("dfs.namenode.handler.count", 2);
conf.setInt("dfs.namenode.service.handler.count", 2);
@@ -594,7 +592,7 @@ public static Configuration setUpConfigForMiniCluster(Configuration conf,
* PhoenixRegionServerEndpoint by default, if some other regionserver coprocs are not already
* present.
*/
- protected static void setPhoenixRegionServerEndpoint(Configuration conf) {
+ public static void setPhoenixRegionServerEndpoint(Configuration conf) {
String value = conf.get(REGIONSERVER_COPROCESSOR_CONF_KEY);
if (value == null) {
value = PhoenixRegionServerEndpointTestImpl.class.getName();
@@ -1713,7 +1711,7 @@ public static void assertValuesEqualsResultSet(ResultSet rs, List>
assertEquals(expectedCount, count);
}
- public static HBaseTestingUtility getUtility() {
+ public static IntegrationTestingUtility getUtility() {
return utility;
}
@@ -1919,9 +1917,8 @@ protected static void splitTable(TableName fullTableName, List splitPoin
assertTrue(
"Number of split points should be less than or equal to the number of region servers ",
splitPoints.size() <= NUM_SLAVES_BASE);
- HBaseTestingUtility util = getUtility();
- MiniHBaseCluster cluster = util.getHBaseCluster();
- HMaster master = cluster.getMaster();
+ IntegrationTestingUtility util = getUtility();
+ HMaster master = util.getHBaseCluster().getMaster();
// We don't want BalancerChore to undo our hard work
assertFalse("Balancer must be off", master.isBalancerOn());
AssignmentManager am = master.getAssignmentManager();
@@ -2010,9 +2007,8 @@ protected static void splitSystemCatalog(Map> tenantToTable
private static void moveRegion(RegionInfo regionInfo, ServerName srcServerName,
ServerName dstServerName) throws Exception {
Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
- HBaseTestingUtility util = getUtility();
- MiniHBaseCluster cluster = util.getHBaseCluster();
- HMaster master = cluster.getMaster();
+ IntegrationTestingUtility util = getUtility();
+ HMaster master = util.getHBaseCluster().getMaster();
AssignmentManager am = master.getAssignmentManager();
HRegionServer dstServer = util.getHBaseCluster().getRegionServer(dstServerName);
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/DelegateCell.java b/phoenix-core/src/test/java/org/apache/phoenix/query/DelegateCell.java
index 2aad2f5733b..6cebd53b4ba 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/DelegateCell.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/DelegateCell.java
@@ -43,9 +43,9 @@ public byte[] getValueArray() {
return delegate.getValueArray();
}
- @Override
+ // Removed from Cell in Hbase 3
public byte getTypeByte() {
- return delegate.getTypeByte();
+ throw new UnsupportedOperationException();
}
@Override
@@ -53,14 +53,14 @@ public long getTimestamp() {
return delegate.getTimestamp();
}
- @Override
+ // Removed from Cell in Hbase 3
public int getTagsOffset() {
- return delegate.getTagsOffset();
+ throw new UnsupportedOperationException();
}
- @Override
+ // Removed from Cell in Hbase 3
public byte[] getTagsArray() {
- return delegate.getTagsArray();
+ throw new UnsupportedOperationException();
}
@Override
@@ -113,14 +113,14 @@ public String toString() {
return name;
}
- @Override
+ // Removed from Cell in Hbase 3
public long getSequenceId() {
- return delegate.getSequenceId();
+ throw new UnsupportedOperationException();
}
- @Override
+ // Removed from Cell in Hbase 3
public int getTagsLength() {
- return delegate.getTagsLength();
+ throw new UnsupportedOperationException();
}
@Override
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/JDBCUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/JDBCUtilTest.java
index 656b0bccd10..fa6bd35afb6 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/JDBCUtilTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/JDBCUtilTest.java
@@ -27,6 +27,7 @@
import java.util.Map;
import java.util.Properties;
import org.apache.hadoop.hbase.client.Consistency;
+import org.apache.phoenix.jdbc.ClusterRoleRecord.RegistryType;
import org.apache.phoenix.query.QueryServices;
import org.junit.Test;
@@ -152,28 +153,28 @@ public void testGetMaxMutateBytes() throws Exception {
@Test
public void formatZookeeperUrlSameOrderTest() {
String zk1 = "zk1.net\\:2181,zk2.net\\:2181,zk3.net\\:2181::/hbase";
- String result = JDBCUtil.formatUrl(zk1);
+ String result = JDBCUtil.formatUrl(zk1, RegistryType.ZK);
assertEquals(zk1, result);
}
@Test
public void formatZookeeperUrlDifferentOrderTest() {
String zk1 = "zk3.net,zk2.net,zk1.net:2181:/hbase";
- String result = JDBCUtil.formatUrl(zk1);
+ String result = JDBCUtil.formatUrl(zk1, RegistryType.ZK);
assertEquals("zk1.net\\:2181,zk2.net\\:2181,zk3.net\\:2181::/hbase", result);
}
@Test
public void formatZookeeperUrlNoTrailersTest() {
String zk1 = "zk1.net,zk2.net,zk3.net";
- String result = JDBCUtil.formatUrl(zk1);
+ String result = JDBCUtil.formatUrl(zk1, RegistryType.ZK);
assertEquals("zk1.net\\:2181,zk2.net\\:2181,zk3.net\\:2181::/hbase", result);
}
@Test
public void formatZookeeperUrlToLowercaseTest() {
String zk1 = "MYHOST1.NET,MYHOST2.NET";
- String result = JDBCUtil.formatUrl(zk1);
+ String result = JDBCUtil.formatUrl(zk1, RegistryType.ZK);
assertEquals("myhost1.net\\:2181,myhost2.net\\:2181::/hbase", result);
}
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java
index 9589d06ecf0..79b038553a8 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java
@@ -174,7 +174,7 @@ public void testTaggingAPutWrongQualifier() throws Exception {
MetaDataUtil.conditionallyAddTagsToPutCells(put, TABLE_FAMILY_BYTES, EMPTY_BYTE_ARRAY,
mockBuilder, EMPTY_BYTE_ARRAY, DUMMY_TAGS);
verify(mockBuilder, never()).setTags(Mockito.any(byte[].class));
- Cell newCell = put.getFamilyCellMap().get(TABLE_FAMILY_BYTES).get(0);
+ ExtendedCell newCell = (ExtendedCell) put.getFamilyCellMap().get(TABLE_FAMILY_BYTES).get(0);
assertEquals(initialCell, newCell);
assertNull(TagUtil.carryForwardTags(newCell));
}
@@ -187,7 +187,7 @@ public void testTaggingAPutUnconditionally() throws Exception {
MetaDataUtil.conditionallyAddTagsToPutCells(put, TABLE_FAMILY_BYTES, QUALIFIER, mockBuilder,
null, DUMMY_TAGS);
verify(mockBuilder, times(1)).setTags(Mockito.any(byte[].class));
- Cell newCell = put.getFamilyCellMap().get(TABLE_FAMILY_BYTES).get(0);
+ ExtendedCell newCell = (ExtendedCell) put.getFamilyCellMap().get(TABLE_FAMILY_BYTES).get(0);
assertEquals(mockCellWithTags, newCell);
}
@@ -200,7 +200,7 @@ public void testSkipTaggingAPutDueToSameCellValue() throws Exception {
MetaDataUtil.conditionallyAddTagsToPutCells(put, TABLE_FAMILY_BYTES, QUALIFIER, mockBuilder,
ORIGINAL_VALUE, DUMMY_TAGS);
verify(mockBuilder, never()).setTags(Mockito.any(byte[].class));
- Cell newCell = put.getFamilyCellMap().get(TABLE_FAMILY_BYTES).get(0);
+ ExtendedCell newCell = (ExtendedCell) put.getFamilyCellMap().get(TABLE_FAMILY_BYTES).get(0);
assertEquals(initialCell, newCell);
assertNull(TagUtil.carryForwardTags(newCell));
}
@@ -322,7 +322,7 @@ public void testConditionallyAddTagsToPutCells() {
UPDATE_CACHE_FREQUENCY_BYTES, cellBuilder, PInteger.INSTANCE.toBytes(1),
VIEW_MODIFIED_PROPERTY_BYTES);
- Cell cell = put.getFamilyCellMap().get(TABLE_FAMILY_BYTES).get(0);
+ ExtendedCell cell = (ExtendedCell) put.getFamilyCellMap().get(TABLE_FAMILY_BYTES).get(0);
// To check the cell tag whether view has modified this property
assertTrue(
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
index 6541dd7338e..7d37dfde370 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
@@ -65,9 +65,9 @@
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.CompareOperator;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
@@ -834,12 +834,12 @@ public static void createMultiCFTestTable(Connection conn, String tableName, Str
conn.createStatement().execute(ddl);
}
- public static void flush(HBaseTestingUtility utility, TableName table) throws IOException {
+ public static void flush(IntegrationTestingUtility utility, TableName table) throws IOException {
Admin admin = utility.getAdmin();
admin.flush(table);
}
- public static void minorCompact(HBaseTestingUtility utility, TableName table)
+ public static void minorCompact(IntegrationTestingUtility utility, TableName table)
throws IOException, InterruptedException {
try {
CompactionScanner.setForceMinorCompaction(true);
@@ -858,7 +858,7 @@ public static void minorCompact(HBaseTestingUtility utility, TableName table)
}
}
- public static void majorCompact(HBaseTestingUtility utility, TableName table)
+ public static void majorCompact(IntegrationTestingUtility utility, TableName table)
throws IOException, InterruptedException {
long compactionRequestedSCN = EnvironmentEdgeManager.currentTimeMillis();
Admin admin = utility.getAdmin();
diff --git a/phoenix-core/src/test/resources/UrlTest.java b/phoenix-core/src/test/resources/UrlTest.java
new file mode 100644
index 00000000000..b7f0cd9e244
--- /dev/null
+++ b/phoenix-core/src/test/resources/UrlTest.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class UrlTest {
+ public static void main(String[ argv]) {
+ String uri = "hbase:zk://host1:1111,host2:2222/hbase"
+ }
+}
diff --git a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionCoprocessorEnvironment.java b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionCoprocessorEnvironment.java
new file mode 100644
index 00000000000..26e98c39d9f
--- /dev/null
+++ b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionCoprocessorEnvironment.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+
+public abstract class CompatDelegateRegionCoprocessorEnvironment
+ implements RegionCoprocessorEnvironment {
+ protected RegionCoprocessorEnvironment delegate;
+
+ public CompatDelegateRegionCoprocessorEnvironment(RegionCoprocessorEnvironment delegate) {
+ super();
+ this.delegate = delegate;
+ }
+
+}
diff --git a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionObserver.java b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionObserver.java
new file mode 100644
index 00000000000..ee111d02b6c
--- /dev/null
+++ b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionObserver.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionObserver;
+import org.apache.hadoop.hbase.wal.WALEdit;
+import org.apache.hadoop.hbase.wal.WALKey;
+
+public abstract class CompatDelegateRegionObserver implements RegionObserver {
+
+ protected final RegionObserver delegate;
+
+ public CompatDelegateRegionObserver(RegionObserver delegate) {
+ this.delegate = delegate;
+ }
+
+ // These are removed from HBase 3.0
+
+ @Override
+ public void preWALRestore(ObserverContext ctx, RegionInfo info, WALKey logKey, WALEdit logEdit)
+ throws IOException {
+ delegate.preWALRestore(ctx, info, logKey, logEdit);
+ }
+
+ @Override
+ public void postWALRestore(ObserverContext ctx, RegionInfo info, WALKey logKey, WALEdit logEdit)
+ throws IOException {
+ delegate.postWALRestore(ctx, info, logKey, logEdit);
+ }
+}
diff --git a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java
index 58225bdd7a0..7c085422555 100644
--- a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java
+++ b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java
@@ -22,6 +22,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFileInfo;
import org.apache.hadoop.hbase.io.hfile.ReaderContext;
@@ -30,8 +31,8 @@
public class CompatIndexHalfStoreFileReader extends StoreFileReader {
public CompatIndexHalfStoreFileReader(final FileSystem fs, final CacheConfig cacheConf,
- final Configuration conf, final ReaderContext readerContext, final HFileInfo hFileInfo, Path p)
- throws IOException {
+ final Configuration conf, final ReaderContext readerContext, final HFileInfo hFileInfo, Path p,
+ Reference r) throws IOException {
super(readerContext, hFileInfo, cacheConf, new AtomicInteger(0), conf);
}
diff --git a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatRPCControllerFactory.java b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatRPCControllerFactory.java
new file mode 100644
index 00000000000..d9911e53114
--- /dev/null
+++ b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatRPCControllerFactory.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CellScannable;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+
+public abstract class CompatRPCControllerFactory extends RpcControllerFactory {
+
+ public CompatRPCControllerFactory(Configuration conf) {
+ super(conf);
+ }
+
+ @Override
+ public HBaseRpcController newController() {
+ HBaseRpcController delegate = super.newController();
+ return getController(delegate);
+ }
+
+ @Override
+ public HBaseRpcController newController(CellScanner cellScanner) {
+ HBaseRpcController delegate = super.newController(cellScanner);
+ return getController(delegate);
+ }
+
+ @Override
+ public HBaseRpcController newController(List cellIterables) {
+ HBaseRpcController delegate = super.newController(cellIterables);
+ return getController(delegate);
+ }
+
+ protected abstract HBaseRpcController getController(HBaseRpcController delegate);
+}
diff --git a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatRegionCoprocessorEnvironment.java b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatRegionCoprocessorEnvironment.java
new file mode 100644
index 00000000000..97ba5b1f1e7
--- /dev/null
+++ b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatRegionCoprocessorEnvironment.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+
+public abstract class CompatRegionCoprocessorEnvironment implements RegionCoprocessorEnvironment {
+
+}
diff --git a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
index caba6f45c50..ca4a20f04de 100644
--- a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
+++ b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
@@ -20,8 +20,19 @@
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
+import org.apache.hadoop.hbase.protobuf.generated.TableProtos;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -37,4 +48,49 @@ public static List getMergeRegions(Connection conn, RegionInfo regio
throws IOException {
return MetaTableAccessor.getMergeRegions(conn, regionInfo.getRegionName());
}
+
+ public static RegionInfo getRegionInfo(Result result) throws IOException {
+ return MetaTableAccessor.getRegionInfo(result);
+ }
+
+ public static Mutation toMutation(MutationProto mProto) throws IOException {
+ return ProtobufUtil.toMutation(mProto);
+ }
+
+ public static MutationProto toMutation(MutationType type, Mutation mutation) throws IOException {
+ return ProtobufUtil.toMutation(type, mutation);
+ }
+
+ public static TableProtos.TableName toProtoTableName(TableName tableName) {
+ return ProtobufUtil.toProtoTableName(tableName);
+ }
+
+ public static Scan toScan(final ClientProtos.Scan proto) throws IOException {
+ return ProtobufUtil.toScan(proto);
+ }
+
+ public static ClientProtos.Scan toScan(Scan scan) throws IOException {
+ return ProtobufUtil.toScan(scan);
+ }
+
+ public static TableName toTableName(TableProtos.TableName tableNamePB) {
+ return TableName.valueOf(tableNamePB.getNamespace().asReadOnlyByteBuffer(),
+ tableNamePB.getQualifier().asReadOnlyByteBuffer());
+ }
+
+ public static void setMvccReadPoint(Scan scan, long mvccReadPoint) {
+ PackagePrivateFieldAccessor.setMvccReadPoint(scan, mvccReadPoint);
+ }
+
+ public static long getMvccReadPoint(Scan scan) {
+ return PackagePrivateFieldAccessor.getMvccReadPoint(scan);
+ }
+
+ public static void closeAdminAndLog(Admin admin, Logger logger) {
+ try {
+ admin.close();
+ } catch (IOException e) {
+ logger.error("Closing the admin failed: ", e);
+ }
+ }
}
diff --git a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionCoprocessorEnvironment.java b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionCoprocessorEnvironment.java
new file mode 100644
index 00000000000..26e98c39d9f
--- /dev/null
+++ b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionCoprocessorEnvironment.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+
+public abstract class CompatDelegateRegionCoprocessorEnvironment
+ implements RegionCoprocessorEnvironment {
+ protected RegionCoprocessorEnvironment delegate;
+
+ public CompatDelegateRegionCoprocessorEnvironment(RegionCoprocessorEnvironment delegate) {
+ super();
+ this.delegate = delegate;
+ }
+
+}
diff --git a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionObserver.java b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionObserver.java
new file mode 100644
index 00000000000..ee111d02b6c
--- /dev/null
+++ b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionObserver.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionObserver;
+import org.apache.hadoop.hbase.wal.WALEdit;
+import org.apache.hadoop.hbase.wal.WALKey;
+
+public abstract class CompatDelegateRegionObserver implements RegionObserver {
+
+ protected final RegionObserver delegate;
+
+ public CompatDelegateRegionObserver(RegionObserver delegate) {
+ this.delegate = delegate;
+ }
+
+ // These are removed from HBase 3.0
+
+ @Override
+ public void preWALRestore(ObserverContext ctx, RegionInfo info, WALKey logKey, WALEdit logEdit)
+ throws IOException {
+ delegate.preWALRestore(ctx, info, logKey, logEdit);
+ }
+
+ @Override
+ public void postWALRestore(ObserverContext ctx, RegionInfo info, WALKey logKey, WALEdit logEdit)
+ throws IOException {
+ delegate.postWALRestore(ctx, info, logKey, logEdit);
+ }
+}
diff --git a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java
index 58225bdd7a0..7c085422555 100644
--- a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java
+++ b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java
@@ -22,6 +22,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFileInfo;
import org.apache.hadoop.hbase.io.hfile.ReaderContext;
@@ -30,8 +31,8 @@
public class CompatIndexHalfStoreFileReader extends StoreFileReader {
public CompatIndexHalfStoreFileReader(final FileSystem fs, final CacheConfig cacheConf,
- final Configuration conf, final ReaderContext readerContext, final HFileInfo hFileInfo, Path p)
- throws IOException {
+ final Configuration conf, final ReaderContext readerContext, final HFileInfo hFileInfo, Path p,
+ Reference r) throws IOException {
super(readerContext, hFileInfo, cacheConf, new AtomicInteger(0), conf);
}
diff --git a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatRPCControllerFactory.java b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatRPCControllerFactory.java
new file mode 100644
index 00000000000..d9911e53114
--- /dev/null
+++ b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatRPCControllerFactory.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CellScannable;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+
+public abstract class CompatRPCControllerFactory extends RpcControllerFactory {
+
+ public CompatRPCControllerFactory(Configuration conf) {
+ super(conf);
+ }
+
+ @Override
+ public HBaseRpcController newController() {
+ HBaseRpcController delegate = super.newController();
+ return getController(delegate);
+ }
+
+ @Override
+ public HBaseRpcController newController(CellScanner cellScanner) {
+ HBaseRpcController delegate = super.newController(cellScanner);
+ return getController(delegate);
+ }
+
+ @Override
+ public HBaseRpcController newController(List cellIterables) {
+ HBaseRpcController delegate = super.newController(cellIterables);
+ return getController(delegate);
+ }
+
+ protected abstract HBaseRpcController getController(HBaseRpcController delegate);
+}
diff --git a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatRegionCoprocessorEnvironment.java b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatRegionCoprocessorEnvironment.java
new file mode 100644
index 00000000000..97ba5b1f1e7
--- /dev/null
+++ b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatRegionCoprocessorEnvironment.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+
+public abstract class CompatRegionCoprocessorEnvironment implements RegionCoprocessorEnvironment {
+
+}
diff --git a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
index 38a940aa43e..2151cfca9cc 100644
--- a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
+++ b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
@@ -20,8 +20,19 @@
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
+import org.apache.hadoop.hbase.protobuf.generated.TableProtos;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -38,4 +49,48 @@ public static List getMergeRegions(Connection conn, RegionInfo regio
return MetaTableAccessor.getMergeRegions(conn, regionInfo);
}
+ public static RegionInfo getRegionInfo(Result result) throws IOException {
+ return MetaTableAccessor.getRegionInfo(result);
+ }
+
+ public static Mutation toMutation(MutationProto mProto) throws IOException {
+ return ProtobufUtil.toMutation(mProto);
+ }
+
+ public static MutationProto toMutation(MutationType type, Mutation mutation) throws IOException {
+ return ProtobufUtil.toMutation(type, mutation);
+ }
+
+ public static TableProtos.TableName toProtoTableName(TableName tableName) {
+ return ProtobufUtil.toProtoTableName(tableName);
+ }
+
+ public static Scan toScan(final ClientProtos.Scan proto) throws IOException {
+ return ProtobufUtil.toScan(proto);
+ }
+
+ public static ClientProtos.Scan toScan(Scan scan) throws IOException {
+ return ProtobufUtil.toScan(scan);
+ }
+
+ public static TableName toTableName(TableProtos.TableName tableNamePB) {
+ return TableName.valueOf(tableNamePB.getNamespace().asReadOnlyByteBuffer(),
+ tableNamePB.getQualifier().asReadOnlyByteBuffer());
+ }
+
+ public static void setMvccReadPoint(Scan scan, long mvccReadPoint) {
+ PackagePrivateFieldAccessor.setMvccReadPoint(scan, mvccReadPoint);
+ }
+
+ public static long getMvccReadPoint(Scan scan) {
+ return PackagePrivateFieldAccessor.getMvccReadPoint(scan);
+ }
+
+ public static void closeAdminAndLog(Admin admin, Logger logger) {
+ try {
+ admin.close();
+ } catch (IOException e) {
+ logger.error("Closing the admin failed: ", e);
+ }
+ }
}
diff --git a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionCoprocessorEnvironment.java b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionCoprocessorEnvironment.java
new file mode 100644
index 00000000000..52194e15970
--- /dev/null
+++ b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionCoprocessorEnvironment.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+
+public abstract class CompatDelegateRegionCoprocessorEnvironment
+ implements RegionCoprocessorEnvironment {
+ protected RegionCoprocessorEnvironment delegate;
+
+ public CompatDelegateRegionCoprocessorEnvironment(RegionCoprocessorEnvironment delegate) {
+ super();
+ this.delegate = delegate;
+ }
+}
diff --git a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionObserver.java b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionObserver.java
new file mode 100644
index 00000000000..ee111d02b6c
--- /dev/null
+++ b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionObserver.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionObserver;
+import org.apache.hadoop.hbase.wal.WALEdit;
+import org.apache.hadoop.hbase.wal.WALKey;
+
+public abstract class CompatDelegateRegionObserver implements RegionObserver {
+
+ protected final RegionObserver delegate;
+
+ public CompatDelegateRegionObserver(RegionObserver delegate) {
+ this.delegate = delegate;
+ }
+
+ // These are removed from HBase 3.0
+
+ @Override
+ public void preWALRestore(ObserverContext ctx, RegionInfo info, WALKey logKey, WALEdit logEdit)
+ throws IOException {
+ delegate.preWALRestore(ctx, info, logKey, logEdit);
+ }
+
+ @Override
+ public void postWALRestore(ObserverContext ctx, RegionInfo info, WALKey logKey, WALEdit logEdit)
+ throws IOException {
+ delegate.postWALRestore(ctx, info, logKey, logEdit);
+ }
+}
diff --git a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java
index ad39db49b93..74089af3d01 100644
--- a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java
+++ b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java
@@ -21,6 +21,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFileInfo;
import org.apache.hadoop.hbase.io.hfile.ReaderContext;
@@ -30,8 +31,8 @@
public class CompatIndexHalfStoreFileReader extends StoreFileReader {
public CompatIndexHalfStoreFileReader(final FileSystem fs, final CacheConfig cacheConf,
- final Configuration conf, final ReaderContext readerContext, final HFileInfo hFileInfo, Path p)
- throws IOException {
+ final Configuration conf, final ReaderContext readerContext, final HFileInfo hFileInfo, Path p,
+ Reference r) throws IOException {
super(readerContext, hFileInfo, cacheConf, new StoreFileInfo(conf, fs, p, true), conf);
}
diff --git a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatRPCControllerFactory.java b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatRPCControllerFactory.java
new file mode 100644
index 00000000000..d9911e53114
--- /dev/null
+++ b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatRPCControllerFactory.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CellScannable;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+
+public abstract class CompatRPCControllerFactory extends RpcControllerFactory {
+
+ public CompatRPCControllerFactory(Configuration conf) {
+ super(conf);
+ }
+
+ @Override
+ public HBaseRpcController newController() {
+ HBaseRpcController delegate = super.newController();
+ return getController(delegate);
+ }
+
+ @Override
+ public HBaseRpcController newController(CellScanner cellScanner) {
+ HBaseRpcController delegate = super.newController(cellScanner);
+ return getController(delegate);
+ }
+
+ @Override
+ public HBaseRpcController newController(List cellIterables) {
+ HBaseRpcController delegate = super.newController(cellIterables);
+ return getController(delegate);
+ }
+
+ protected abstract HBaseRpcController getController(HBaseRpcController delegate);
+}
diff --git a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatRegionCoprocessorEnvironment.java b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatRegionCoprocessorEnvironment.java
new file mode 100644
index 00000000000..97ba5b1f1e7
--- /dev/null
+++ b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatRegionCoprocessorEnvironment.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+
+public abstract class CompatRegionCoprocessorEnvironment implements RegionCoprocessorEnvironment {
+
+}
diff --git a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
index 38a940aa43e..2151cfca9cc 100644
--- a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
+++ b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
@@ -20,8 +20,19 @@
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
+import org.apache.hadoop.hbase.protobuf.generated.TableProtos;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -38,4 +49,48 @@ public static List getMergeRegions(Connection conn, RegionInfo regio
return MetaTableAccessor.getMergeRegions(conn, regionInfo);
}
+ public static RegionInfo getRegionInfo(Result result) throws IOException {
+ return MetaTableAccessor.getRegionInfo(result);
+ }
+
+ public static Mutation toMutation(MutationProto mProto) throws IOException {
+ return ProtobufUtil.toMutation(mProto);
+ }
+
+ public static MutationProto toMutation(MutationType type, Mutation mutation) throws IOException {
+ return ProtobufUtil.toMutation(type, mutation);
+ }
+
+ public static TableProtos.TableName toProtoTableName(TableName tableName) {
+ return ProtobufUtil.toProtoTableName(tableName);
+ }
+
+ public static Scan toScan(final ClientProtos.Scan proto) throws IOException {
+ return ProtobufUtil.toScan(proto);
+ }
+
+ public static ClientProtos.Scan toScan(Scan scan) throws IOException {
+ return ProtobufUtil.toScan(scan);
+ }
+
+ public static TableName toTableName(TableProtos.TableName tableNamePB) {
+ return TableName.valueOf(tableNamePB.getNamespace().asReadOnlyByteBuffer(),
+ tableNamePB.getQualifier().asReadOnlyByteBuffer());
+ }
+
+ public static void setMvccReadPoint(Scan scan, long mvccReadPoint) {
+ PackagePrivateFieldAccessor.setMvccReadPoint(scan, mvccReadPoint);
+ }
+
+ public static long getMvccReadPoint(Scan scan) {
+ return PackagePrivateFieldAccessor.getMvccReadPoint(scan);
+ }
+
+ public static void closeAdminAndLog(Admin admin, Logger logger) {
+ try {
+ admin.close();
+ } catch (IOException e) {
+ logger.error("Closing the admin failed: ", e);
+ }
+ }
}
diff --git a/phoenix-hbase-compat-3.0.0/pom.xml b/phoenix-hbase-compat-3.0.0/pom.xml
new file mode 100644
index 00000000000..25dfa0c6abe
--- /dev/null
+++ b/phoenix-hbase-compat-3.0.0/pom.xml
@@ -0,0 +1,99 @@
+
+
+
+ 4.0.0
+
+ org.apache.phoenix
+ phoenix
+ 5.3.0-SNAPSHOT
+
+
+ phoenix-hbase-compat-3.0.0
+ Phoenix Hbase 3.0.0 compatibility
+ Compatibility module for HBase 3.0.0+
+
+
+
+ 3.0.0-beta-2-SNAPSHOT
+
+
+
+
+
+ org.apache.hbase
+ hbase-client
+ ${hbase30.compat.version}
+ provided
+
+
+ org.apache.hbase
+ hbase-common
+ ${hbase30.compat.version}
+ provided
+
+
+ org.apache.hbase
+ hbase-server
+ ${hbase30.compat.version}
+ provided
+
+
+
+ org.apache.hbase
+ hbase-hadoop-compat
+ ${hbase30.compat.version}
+ provided
+
+
+ org.apache.hbase
+ hbase-protocol-shaded
+ ${hbase30.compat.version}
+ provided
+
+
+ org.apache.hbase
+ hbase-zookeeper
+ ${hbase30.compat.version}
+ provided
+
+
+ org.apache.hbase
+ hbase-metrics
+ ${hbase30.compat.version}
+ provided
+
+
+ org.apache.hbase
+ hbase-metrics-api
+ ${hbase30.compat.version}
+ provided
+
+
+ org.slf4j
+ slf4j-api
+ provided
+
+
+
+ junit
+ junit
+ test
+
+
+
+
diff --git a/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/ByteStringer.java b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/ByteStringer.java
new file mode 100644
index 00000000000..4a6e343a95a
--- /dev/null
+++ b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/ByteStringer.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
+import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
+
+// This has different signature in the HBase 2 and 3 modules
+// This only comes together after the maven-replacer plugin relocates all protobuf code.
+public class ByteStringer {
+ public static ByteString wrap(final byte[] array) {
+ return UnsafeByteOperations.unsafeWrap(array);
+ }
+}
diff --git a/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java
new file mode 100644
index 00000000000..3eba96ca54b
--- /dev/null
+++ b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.RowMutations;
+import org.apache.hadoop.hbase.client.Table;
+
+public abstract class CompatDelegateHTable implements Table {
+
+ protected final Table delegate;
+
+ public CompatDelegateHTable(Table delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public RegionLocator getRegionLocator() throws IOException {
+ return delegate.getRegionLocator();
+ }
+
+ @Override
+ public Result mutateRow(RowMutations rm) throws IOException {
+ return delegate.mutateRow(rm);
+ }
+}
diff --git a/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionCoprocessorEnvironment.java b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionCoprocessorEnvironment.java
new file mode 100644
index 00000000000..8a295171197
--- /dev/null
+++ b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionCoprocessorEnvironment.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.quotas.OperationQuota;
+import org.apache.hadoop.hbase.quotas.OperationQuota.OperationType;
+import org.apache.hadoop.hbase.quotas.RpcQuotaManager;
+import org.apache.hadoop.hbase.quotas.RpcThrottlingException;
+import org.apache.hadoop.hbase.regionserver.Region;
+
+public abstract class CompatDelegateRegionCoprocessorEnvironment
+ implements RegionCoprocessorEnvironment {
+ protected RegionCoprocessorEnvironment delegate;
+
+ public CompatDelegateRegionCoprocessorEnvironment(RegionCoprocessorEnvironment delegate) {
+ super();
+ this.delegate = delegate;
+ }
+
+ @Override
+ public OperationQuota checkScanQuota(Scan scan, long maxBlockBytesScanned,
+ long prevBlockBytesScannedDifference) throws IOException, RpcThrottlingException {
+ return delegate.checkScanQuota(scan, maxBlockBytesScanned, prevBlockBytesScannedDifference);
+ }
+
+ @Override
+ public OperationQuota checkBatchQuota(Region region, int numWrites, int numReads)
+ throws IOException, RpcThrottlingException {
+ return delegate.checkBatchQuota(region, numWrites, numReads);
+ }
+
+ @Override
+ public OperationQuota checkBatchQuota(Region arg0, OperationType arg1)
+ throws IOException, RpcThrottlingException {
+ return delegate.checkBatchQuota(arg0, arg1);
+ }
+
+ @Override
+ public RpcQuotaManager getRpcQuotaManager() {
+ return delegate.getRpcQuotaManager();
+ }
+}
diff --git a/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionObserver.java b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionObserver.java
new file mode 100644
index 00000000000..b7e2cf78512
--- /dev/null
+++ b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateRegionObserver.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import org.apache.hadoop.hbase.coprocessor.RegionObserver;
+
+public abstract class CompatDelegateRegionObserver implements RegionObserver {
+
+ protected final RegionObserver delegate;
+
+ public CompatDelegateRegionObserver(RegionObserver delegate) {
+ this.delegate = delegate;
+ }
+}
diff --git a/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java
new file mode 100644
index 00000000000..b7907493fad
--- /dev/null
+++ b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.Reference;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.io.hfile.HFileInfo;
+import org.apache.hadoop.hbase.io.hfile.HFileScanner;
+import org.apache.hadoop.hbase.io.hfile.ReaderContext;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.regionserver.StoreFileReader;
+
+public class CompatIndexHalfStoreFileReader extends StoreFileReader {
+
+ public CompatIndexHalfStoreFileReader(final FileSystem fs, final CacheConfig cacheConf,
+ final Configuration conf, final ReaderContext readerContext, final HFileInfo hFileInfo, Path p,
+ Reference r) throws IOException {
+ super(readerContext, hFileInfo, cacheConf, new StoreFileInfo(conf, fs,
+ readerContext.getFileSize(), p, 0l, r, null, readerContext.isPrimaryReplicaReader()), conf);
+ }
+
+ // getScanner is private in HBase 3.0, expose it
+ public HFileScanner getCompatScanner(boolean cacheBlocks, boolean pread, boolean isCompaction) {
+ return getScanner(cacheBlocks, pread, isCompaction);
+ }
+}
diff --git a/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java
new file mode 100644
index 00000000000..c23b0be5d87
--- /dev/null
+++ b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import org.apache.hadoop.hbase.regionserver.wal.ProtobufWALStreamReader;
+
+public abstract class CompatIndexedHLogReader extends ProtobufWALStreamReader {
+
+}
diff --git a/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java
new file mode 100644
index 00000000000..66907a57ee8
--- /dev/null
+++ b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
+
+public class CompatLocalIndexStoreFileScanner extends StoreFileScanner {
+
+ public CompatLocalIndexStoreFileScanner(CompatIndexHalfStoreFileReader reader,
+ boolean cacheBlocks, boolean pread, boolean isCompaction, long readPt, long scannerOrder,
+ boolean canOptimizeForNonNullColumn) {
+ super(reader, reader.getCompatScanner(cacheBlocks, pread, isCompaction), !isCompaction,
+ reader.getHFileReader().hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn,
+ reader.getHFileReader().getDataBlockEncoding() == DataBlockEncoding.ROW_INDEX_V1);
+ }
+
+}
diff --git a/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java
new file mode 100644
index 00000000000..f878a30d292
--- /dev/null
+++ b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.RowMutations;
+import org.apache.hadoop.hbase.client.Table;
+
+public abstract class CompatOmidTransactionTable implements Table {
+
+ @Override
+ public RegionLocator getRegionLocator() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Result mutateRow(RowMutations rm) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+}
diff --git a/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPagingFilter.java b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPagingFilter.java
new file mode 100644
index 00000000000..95660e365c9
--- /dev/null
+++ b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPagingFilter.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterBase;
+
+public abstract class CompatPagingFilter extends FilterBase {
+ protected Filter delegate = null;
+
+ public CompatPagingFilter(Filter delegate) {
+ this.delegate = delegate;
+ }
+}
diff --git a/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java
new file mode 100644
index 00000000000..a59843351f2
--- /dev/null
+++ b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.ipc.CallRunner;
+import org.apache.hadoop.hbase.ipc.RpcScheduler;
+
+/**
+ * {@link RpcScheduler} that first checks to see if this is an index or metadata update before
+ * passing off the call to the delegate {@link RpcScheduler}.
+ */
+public abstract class CompatPhoenixRpcScheduler extends RpcScheduler {
+ protected RpcScheduler delegate;
+
+ @Override
+ public boolean dispatch(CallRunner task) {
+ try {
+ return compatDispatch(task);
+ } catch (Exception e) {
+ // This never happens with Hbase 2.5
+ throw new RuntimeException(e);
+ }
+ }
+
+ public int getActiveRpcHandlerCount() {
+ return delegate.getActiveRpcHandlerCount();
+ }
+
+ @Override
+ public int getActiveBulkLoadRpcHandlerCount() {
+ return delegate.getActiveBulkLoadRpcHandlerCount();
+ }
+
+ @Override
+ public int getBulkLoadQueueLength() {
+ return delegate.getBulkLoadQueueLength();
+ }
+
+ public abstract boolean compatDispatch(CallRunner task) throws IOException, InterruptedException;
+}
diff --git a/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatRPCControllerFactory.java b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatRPCControllerFactory.java
new file mode 100644
index 00000000000..d21b0c97fa2
--- /dev/null
+++ b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatRPCControllerFactory.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ExtendedCellScannable;
+import org.apache.hadoop.hbase.ExtendedCellScanner;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+
+public abstract class CompatRPCControllerFactory extends RpcControllerFactory {
+
+ public CompatRPCControllerFactory(Configuration conf) {
+ super(conf);
+ }
+
+ @Override
+ public HBaseRpcController newController() {
+ HBaseRpcController delegate = super.newController();
+ return getController(delegate);
+ }
+
+ @Override
+ public HBaseRpcController newController(ExtendedCellScanner cellScanner) {
+ HBaseRpcController delegate = super.newController(cellScanner);
+ return getController(delegate);
+ }
+
+ @Override
+ public HBaseRpcController newController(List cellIterables) {
+ HBaseRpcController delegate = super.newController(cellIterables);
+ return getController(delegate);
+ }
+
+ protected abstract HBaseRpcController getController(HBaseRpcController delegate);
+}
diff --git a/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatRegionCoprocessorEnvironment.java b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatRegionCoprocessorEnvironment.java
new file mode 100644
index 00000000000..1d73dde181b
--- /dev/null
+++ b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatRegionCoprocessorEnvironment.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.quotas.OperationQuota;
+import org.apache.hadoop.hbase.quotas.OperationQuota.OperationType;
+import org.apache.hadoop.hbase.quotas.RpcQuotaManager;
+import org.apache.hadoop.hbase.quotas.RpcThrottlingException;
+import org.apache.hadoop.hbase.regionserver.Region;
+
+public abstract class CompatRegionCoprocessorEnvironment implements RegionCoprocessorEnvironment {
+
+ @Override
+ public OperationQuota checkBatchQuota(Region arg0, OperationType arg1)
+ throws IOException, RpcThrottlingException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public RpcQuotaManager getRpcQuotaManager() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public OperationQuota checkScanQuota(Scan scan, long maxBlockBytesScanned,
+ long prevBlockBytesScannedDifference) throws IOException, RpcThrottlingException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public OperationQuota checkBatchQuota(Region region, int numWrites, int numReads)
+ throws IOException, RpcThrottlingException {
+ throw new UnsupportedOperationException();
+ }
+}
diff --git a/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
new file mode 100644
index 00000000000..8957f509607
--- /dev/null
+++ b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.io.IOException;
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CatalogFamilyFormat;
+import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ClientInternalHelper;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.io.hfile.HFileContext;
+import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.regionserver.StoreUtils;
+import org.apache.hadoop.hbase.util.ChecksumType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+
+public class CompatUtil {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(CompatUtil.class);
+
+ private CompatUtil() {
+ // Not to be instantiated
+ }
+
+ public static HFileContext createHFileContext(Configuration conf, Algorithm compression,
+ Integer blockSize, DataBlockEncoding encoding, CellComparator comparator) {
+
+ return new HFileContextBuilder().withCompression(compression)
+ .withChecksumType(StoreUtils.getChecksumType(conf))
+ .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blockSize)
+ .withDataBlockEncoding(encoding).build();
+ }
+
+ public static List getMergeRegions(Connection conn, RegionInfo regionInfo)
+ throws IOException {
+ return CatalogFamilyFormat
+ .getMergeRegions(MetaTableAccessor.getRegionResult(conn, regionInfo).rawCells());
+ }
+
+ public static RegionInfo getRegionInfo(Result result) throws IOException {
+ return CatalogFamilyFormat.getRegionInfo(result);
+ }
+
+ public static ChecksumType getChecksumType(Configuration conf) {
+ return StoreUtils.getChecksumType(conf);
+ }
+
+ public static int getBytesPerChecksum(Configuration conf) {
+ return StoreUtils.getBytesPerChecksum(conf);
+ }
+
+ public static Connection createShortCircuitConnection(final Configuration configuration,
+ final RegionCoprocessorEnvironment env) throws IOException {
+ return env.createConnection(configuration);
+ }
+
+ public static Mutation toMutation(ClientProtos.MutationProto mProto) throws IOException {
+ return ProtobufUtil.toMutation(mProto);
+ }
+
+ public static ClientProtos.MutationProto toMutation(ClientProtos.MutationProto.MutationType type,
+ Mutation mutation) throws IOException {
+ return ProtobufUtil.toMutation(type, mutation);
+ }
+
+ public static Scan toScan(final ClientProtos.Scan proto) throws IOException {
+ return ProtobufUtil.toScan(proto);
+ }
+
+ public static ClientProtos.Scan toScan(Scan scan) throws IOException {
+ return ProtobufUtil.toScan(scan);
+ }
+
+ public static HBaseProtos.TableName toProtoTableName(TableName tableName) {
+ return ProtobufUtil.toProtoTableName(tableName);
+ }
+
+ public static TableName toTableName(HBaseProtos.TableName tableNamePB) {
+ return TableName.valueOf(tableNamePB.getNamespace().asReadOnlyByteBuffer(),
+ tableNamePB.getQualifier().asReadOnlyByteBuffer());
+ }
+
+ public static void setMvccReadPoint(Scan scan, long mvccReadPoint) {
+ ClientInternalHelper.setMvccReadPoint(scan, mvccReadPoint);
+ }
+
+ public static long getMvccReadPoint(Scan scan) {
+ return ClientInternalHelper.getMvccReadPoint(scan);
+ }
+
+ public static void closeAdminAndLog(Admin admin, Logger logger) {
+ admin.close();
+ }
+}
diff --git a/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java
new file mode 100644
index 00000000000..ccd416a9143
--- /dev/null
+++ b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+public class HbaseCompatCapabilities {
+ // Currently every supported HBase version has the same capabilities, so there is
+ // nothing in here.
+}
diff --git a/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/ReplicationSinkCompatEndpoint.java b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/ReplicationSinkCompatEndpoint.java
new file mode 100644
index 00000000000..08e73164e3a
--- /dev/null
+++ b/phoenix-hbase-compat-3.0.0/src/main/java/org/apache/phoenix/compat/hbase/ReplicationSinkCompatEndpoint.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Optional;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor;
+import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.RegionServerObserver;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
+
+/**
+ * Replication Sink compat endpoint that helps attach WAL attributes to mutation. In order to do so,
+ * this endpoint utilizes regionserver hook
+ * {@link #preReplicationSinkBatchMutate(ObserverContext, AdminProtos.WALEntry, Mutation)}
+ */
+public class ReplicationSinkCompatEndpoint
+ implements RegionServerCoprocessor, RegionServerObserver {
+
+ @Override
+ public Optional getRegionServerObserver() {
+ return Optional.of(this);
+ }
+
+ @Override
+ public void preReplicationSinkBatchMutate(ObserverContext ctx,
+ AdminProtos.WALEntry walEntry, Mutation mutation) throws IOException {
+ RegionServerObserver.super.preReplicationSinkBatchMutate(ctx, walEntry, mutation);
+ List attributeList = walEntry.getKey().getExtendedAttributesList();
+ attachWALExtendedAttributesToMutation(mutation, attributeList);
+ }
+
+ private void attachWALExtendedAttributesToMutation(Mutation mutation,
+ List attributeList) {
+ if (attributeList != null) {
+ for (WALProtos.Attribute attribute : attributeList) {
+ mutation.setAttribute(attribute.getKey(), attribute.getValue().toByteArray());
+ }
+ }
+ }
+}
diff --git a/pom.xml b/pom.xml
index f7a553bd3e4..a921fa6d76c 100644
--- a/pom.xml
+++ b/pom.xml
@@ -45,6 +45,7 @@
+ phoenix-hbase-compat-3.0.0
phoenix-hbase-compat-2.6.0
phoenix-hbase-compat-2.5.4
phoenix-hbase-compat-2.5.0
@@ -79,6 +80,11 @@
2.5.12-hadoop3
2.6.1-hadoop3
2.6.3-hadoop3
+ 3.0.0-beta-2-SNAPSHOT
+
+
+ ${hbase.version}
1.8
${compileSource}
@@ -144,6 +150,7 @@
2.10
3.5.0
+ 1.5.2
2.4.0
${antlr.version}
4.8.1.0
@@ -258,6 +265,11 @@
phoenix-client-embedded-hbase-2.5
${project.version}
| | | |
+
+ org.apache.phoenix
+ phoenix-client-embedded-hbase-2.6.0
+ ${project.version}
+
org.apache.phoenix
phoenix-client-embedded-hbase-2.6
@@ -265,7 +277,7 @@
org.apache.phoenix
- phoenix-client-embedded-hbase-2.6.0
+ phoenix-client-embedded-hbase-3.0
${project.version}
@@ -293,6 +305,11 @@
phoenix-client-lite-hbase-2.6
${project.version}
+
+ org.apache.phoenix
+ phoenix-client-lite-hbase-3.0
+ ${project.version}
+
org.apache.phoenix
phoenix-server-hbase-2.5.0
@@ -318,6 +335,11 @@
phoenix-server-hbase-2.6
${project.version}
+
+ org.apache.phoenix
+ phoenix-server-hbase-3.0
+ ${project.version}
+
org.apache.phoenix
phoenix-mapreduce-byo-shaded-hbase-hbase-2.5.0
@@ -343,6 +365,11 @@
phoenix-mapreduce-byo-shaded-hbase-hbase-2.6
${project.version}
+
+ org.apache.phoenix
+ phoenix-mapreduce-byo-shaded-hbase-hbase-3.0
+ ${project.version}
+
org.apache.phoenix
phoenix-pherf
@@ -373,6 +400,11 @@
phoenix-hbase-compat-2.6.0
${project.version}
+
+ org.apache.phoenix
+ phoenix-hbase-compat-3.0.0
+ ${project.version}
+
org.apache.phoenix
@@ -762,6 +794,11 @@
hbase-annotations
${hbase.version}
+
+ org.apache.hbase
+ hbase-balancer
+ ${hbase.version}
+
org.apache.hbase
hbase-protocol
@@ -1523,34 +1560,11 @@
org.codehaus.mojo
build-helper-maven-plugin
${maven-build-helper-plugin.version}
-
-
- add-test-source
-
- add-test-source
-
- validate
-
-
- ${basedir}/src/it/java
-
-
-
-
- add-test-resource
-
- add-test-resource
-
- validate
-
-
-
- ${basedir}/src/it/resources
-
-
-
-
-
+
+
+ com.google.code.maven-replacer-plugin
+ replacer
+ ${maven-replacer-plugin.version}
org.apache.maven.plugins
@@ -1563,7 +1577,7 @@
${test.output.tofile}
180
exit
- ${basedir}/src/it/java
+
false
false
@@ -2209,6 +2223,22 @@
${hbase-2.6.0.runtime.version}
+
+
+ phoenix-hbase-compat-3.0.0
+
+
+ hbase.profile
+ 3.0
+
+
+
+ 3.0
+ 3.0.0
+ 3.4.2
+ ${hbase-3.0.runtime.version}
+
+
owasp-dependency-check
@@ -2378,6 +2408,10 @@
linux
aarch64
+
+ hbase.profile
+ !3.0
+
@@ -2393,6 +2427,10 @@
mac
aarch64
+
+ hbase.profile
+ !3.0
+
osx-x86_64