Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,30 @@
*/
package org.apache.phoenix.jdbc;

import org.apache.hadoop.hbase.client.Consistency;
import org.apache.phoenix.exception.FailoverSQLException;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.exception.SQLExceptionInfo;
import org.apache.phoenix.execute.MutationState;
import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
import org.apache.phoenix.log.LogLevel;
import org.apache.phoenix.monitoring.MetricType;
import org.apache.phoenix.query.ConnectionQueryServices;
import org.apache.phoenix.schema.PMetaData;
import org.apache.phoenix.schema.PName;
import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.schema.PTableKey;
import org.apache.phoenix.schema.types.PDataType;
import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
import org.apache.phoenix.util.EnvironmentEdgeManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import javax.annotation.Nullable;
import java.io.IOException;
import java.io.PrintStream;
import java.io.Reader;
import java.sql.Array;
import java.sql.Blob;
import java.sql.CallableStatement;
Expand All @@ -41,7 +55,9 @@
import java.sql.Savepoint;
import java.sql.Statement;
import java.sql.Struct;
import java.text.Format;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.Executor;
Expand Down Expand Up @@ -372,6 +388,37 @@ public PreparedStatement prepareStatement(String sql) throws SQLException {
return wrapActionDuringFailover(() -> connection.prepareStatement(sql));
}

@Override
public String getDatePattern() throws SQLException {
return wrapActionDuringFailover(() -> connection.getDatePattern());
}

@Override
public PTable getTable(@Nullable String tenantId, String fullTableName, @Nullable Long timestamp) throws SQLException {
return wrapActionDuringFailover(() -> connection.getTable(tenantId, fullTableName, timestamp));
}

@Override
public boolean isRunningUpgrade() throws SQLException {
return wrapActionDuringFailover(() -> connection.isRunningUpgrade());
}

@Override
public String getURL() throws SQLException {
return wrapActionDuringFailover(() -> connection.getURL());
}

@Override
public LogLevel getLogLevel() throws SQLException {
return wrapActionDuringFailover(() -> connection.getLogLevel());
}

@Override
public KeyValueBuilder getKeyValueBuilder() throws SQLException {
return wrapActionDuringFailover(() -> connection.getKeyValueBuilder());
}


@Override
public CallableStatement prepareCall(String sql) throws SQLException {
return wrapActionDuringFailover(() -> connection.prepareCall(sql));
Expand Down Expand Up @@ -623,6 +670,88 @@ public int getNetworkTimeout() throws SQLException {
return wrapActionDuringFailover(() -> connection.getNetworkTimeout());
}

@Override
public ConnectionQueryServices getQueryServices() throws SQLException {
return wrapActionDuringFailover(() -> connection.getQueryServices());
}

@Override
public PTable getTable(PTableKey key) throws SQLException {
return wrapActionDuringFailover(() -> connection.getTable(key));
}

@Override
public PTable getTable(String name) throws SQLException {
return wrapActionDuringFailover(() -> connection.getTable(name));
}

@Override
public PTable getTableNoCache(String name) throws SQLException {
return wrapActionDuringFailover(() -> connection.getTableNoCache(name));
}

@Override
public Consistency getConsistency() throws SQLException {
return wrapActionDuringFailover(() -> connection.getConsistency());
}

@Override
@Nullable
public PName getTenantId() throws SQLException {
return wrapActionDuringFailover(() -> connection.getTenantId());
}

@Override
public MutationState getMutationState() throws SQLException {
return wrapActionDuringFailover(() -> connection.getMutationState());
}

@Override
public PMetaData getMetaDataCache() throws SQLException {
return wrapActionDuringFailover(() -> connection.getMetaDataCache());
}

@Override
public int getMutateBatchSize() throws SQLException {
return wrapActionDuringFailover(() -> connection.getMutateBatchSize());
}

@Override
public int executeStatements(Reader reader, List<Object> binds, PrintStream out) throws IOException, SQLException {
return wrapActionDuringFailover(() -> {
try {
return connection.executeStatements(reader,binds,out);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
}

@Override
public Format getFormatter(PDataType type) throws SQLException {
return wrapActionDuringFailover(() -> connection.getFormatter(type));
}

@Override
public void setRunningUpgrade(boolean isRunningUpgrade) throws SQLException {
wrapActionDuringFailover(() -> connection.setRunningUpgrade(isRunningUpgrade));
}

@Override
public PTable getTable(String tenantId, String fullTableName) throws SQLException {
return wrapActionDuringFailover(() -> connection.getTable(tenantId, fullTableName));
}

@Override
public PTable getTableNoCache(PName tenantId, String name) throws SQLException {
return wrapActionDuringFailover(() -> connection.getTableNoCache(tenantId, name));
}

@Override
public void setIsClosing(boolean imitateIsClosing) throws SQLException {
wrapActionDuringFailover(() -> connection.setIsClosing(imitateIsClosing));
}

/**
* @return the currently wrapped connection.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ public class HighAvailabilityGroup {
public static final String PHOENIX_HA_TRANSITION_TIMEOUT_MS_KEY =
PHOENIX_HA_ATTR_PREFIX + "transition.timeout.ms";
public static final long PHOENIX_HA_TRANSITION_TIMEOUT_MS_DEFAULT = 5 * 60 * 1000; // 5 mins

public static final String HA_GROUP_PROFILE = "phoenix.ha.profile.active";
static final Logger LOG = LoggerFactory.getLogger(HighAvailabilityGroup.class);

/**
Expand Down Expand Up @@ -731,7 +731,7 @@ HAGroupInfo getGroupInfo() {
return info;
}

Properties getProperties() {
public Properties getProperties() {
return properties;
}

Expand All @@ -748,7 +748,7 @@ public ClusterRoleRecord getRoleRecord() {
* The lifecycle management is confined to this class because an HA group is a shared resource.
* Someone calling close on this would make it unusable, since the state would become closed.
*/
void close() {
public void close() {
roleManagerExecutor.shutdownNow();
try {
// TODO: Parameterize and set in future work item for pluggable
Expand Down
Loading