Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
c50d221
Updated CatalogTableObject to accomodate new Spark versions
Feb 20, 2019
dd7c92f
update gradle to 5.2.1
Feb 20, 2019
c7194b6
more changes to thrift for Catalog object support in newer Spark rele…
Feb 20, 2019
a48542c
Merge remote-tracking branch 'origin/snappy/master' into spark-multiv…
May 5, 2019
b9f7ec0
Merge remote-tracking branch 'origin/snappy/master' into spark-multiv…
Jun 5, 2019
ea0fed5
Merge remote-tracking branch 'origin/snappy/master' into spark-multiv…
Jun 22, 2019
9b0dff1
update to gradle 5.4.1
Jun 25, 2019
bb343bd
more build changes
Jun 25, 2019
9e7ba87
add SparkSupport to entry point
Jun 25, 2019
a9f2930
Merge remote-tracking branch 'origin/snappy/master' into spark-multiv…
Jul 28, 2019
2cd1ba1
Merge remote-tracking branch 'origin/snappy/master' into spark-multiv…
Aug 22, 2019
0a98944
Merge remote-tracking branch 'origin/snappy/master' into spark-multiv…
Sep 20, 2019
07e1f61
fix build
Sep 22, 2019
90c7457
minor changes
Sep 29, 2019
34397f2
revert spark-unsafe dependency changes
Oct 13, 2019
e8cb32e
update gradle to 5.6.4
Dec 26, 2019
9017d3c
support for ALTER TABLE SCHEMA
Jan 30, 2020
3a3eb69
rename var for consistency
Feb 3, 2020
b05ed2d
updated dependencies
Feb 12, 2020
28c550a
update spark version
Feb 18, 2020
3a356cd
avoid logging when no existing disk region was destroyed during create
Feb 21, 2020
622775a
allow exact match against SNAPPYSYS_INTERNAL tables in getColumns/get…
Feb 26, 2020
67426d2
change spark-unsafe to be compileOnly
Mar 2, 2020
bce3c9d
correct spark-unsafe deps to include in runtime (for snappy-core and …
Mar 6, 2020
bdffb42
Revert "correct spark-unsafe deps to include in runtime (for snappy-c…
Mar 6, 2020
bdd9822
removing spark-unsafe dependency
Mar 15, 2020
fbacaa8
use BigDecimal.valueOf instead of new BigDecimal
Mar 20, 2020
e5ecb5d
check that the new SparkOutOfMemoryError is not a JVM failure
Mar 27, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 10 additions & 10 deletions build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ buildscript {
}
dependencies {
classpath 'gradle.plugin.ca.coglinc2:javacc-gradle-plugin:3.0.0'
classpath 'com.github.jengelman.gradle.plugins:shadow:4.0.3'
classpath 'de.undercouch:gradle-download-task:3.4.3'
classpath 'com.github.jengelman.gradle.plugins:shadow:5.2.0'
classpath 'de.undercouch:gradle-download-task:4.0.4'
}
}

Expand All @@ -42,11 +42,11 @@ allprojects {

ext {
scalaBinaryVersion = '2.11'
sparkVersion = '2.1.1'
sparkVersion = '2.4.5'
springVersion = '3.2.18.RELEASE'
springShellVersion = '1.0.0.RELEASE'
log4jVersion = '1.2.17'
slf4jVersion = '1.7.25'
slf4jVersion = '1.7.30'
junitVersion = '4.12'
hamcrestVersion = '1.3'
jmockVersion = '2.9.0'
Expand All @@ -58,19 +58,19 @@ allprojects {
antlr2Version = '2.7.7'
pxfVersion = '2.5.1.0'
osgiVersion = '6.0.0'
jettyVersion = '9.2.26.v20180806'
jettyVersion = '9.3.28.v20191105'
hadoopVersion = '2.7.7'
protobufVersion = '2.6.1'
kryoVersion = '4.0.2'
thriftVersion = '0.9.3'
jnaVersion = '4.5.2'
jerseyVersion = '2.22.2'
jsr305Version = '3.0.2'
servletAPIVersion = '3.1.0'
javaxServletVersion = '3.1.0'
derbyVersion = '10.14.2.0'
hibernateVersion = '4.3.11.Final'
hibernateJpaVersion = '1.0.2.Final'
commonsBeanutilsVersion = '1.9.3'
commonsBeanutilsVersion = '1.9.4'
commonsCliVersion = '1.4'
commonsCodecVersion = '1.11'
commonsConfigVersion = '1.10'
Expand All @@ -94,12 +94,12 @@ allprojects {
hadoopJettyVersion = '6.1.26'
sunJerseyVersion = '1.19.4'
guavaVersion = '14.0.1'
nettyAllVersion = '4.0.56.Final'
nettyAllVersion = '4.1.45.Final'
jlineVersion = '2.14.6'
jlineSfVersion = '1.0.S2-B'
jackson1Version = '1.9.13'
eclipseCollectionsVersion = '9.2.0'
snappyJavaVersion = '1.1.7.2'
eclipseCollectionsVersion = '10.1.0'
snappyJavaVersion = '1.1.7.3'
rsApiVersion = '2.1.1'
htraceVersion = '3.2.0-incubating'
clouderaHtraceVersion = '2.05'
Expand Down
2 changes: 1 addition & 1 deletion filehdr-mod.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/*
* Changes for TIBCO ComputeDB data platform.
*
* Portions Copyright (c) 2017-2019 TIBCO Software Inc. All rights reserved.
* Portions Copyright (c) 2017-2020 TIBCO Software Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
Expand Down
2 changes: 1 addition & 1 deletion filehdr.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2019 TIBCO Software Inc. All rights reserved.
* Copyright (c) 2017-2020 TIBCO Software Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,8 @@ public static void setSkipOOMEForThread(boolean skip) {
public static boolean isJVMFailureError(Error err) {
// all VirtualMachineErrors are not fatal to the JVM, in particular
// StackOverflowError is not
if (err instanceof OutOfMemoryError) {
if (err instanceof OutOfMemoryError &&
!err.getClass().getName().contains("SparkOutOfMemoryError")) {
if (SKIP_OOME.get() == Boolean.TRUE) return false;
// ignore OOMEs thrown by Spark
String message = err.getMessage();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,8 @@

public final class JGroupMembershipManager implements MembershipManager {

public static final String DEFAULT_LEADER_MEMBER_WEIGHT_NAME = "gemfire.member-weight";

/** product version to use for multicast serialization */
volatile boolean disableMulticastForRollingUpgrade;

Expand Down Expand Up @@ -1506,7 +1508,7 @@ private JChannel createChannel(LogWriterI18n theLogger,
properties = replaceStrings(properties, "PARTITION_THRESHOLD",
String.valueOf(threshold));

int weight = Integer.getInteger("gemfire.member-weight", 0);
int weight = Integer.getInteger(DEFAULT_LEADER_MEMBER_WEIGHT_NAME, 0);
properties = replaceStrings(properties, "MEMBER_WEIGHT", String.valueOf(weight));

if (theLogger.fineEnabled()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9596,11 +9596,14 @@ protected DiskRegion createDiskRegion(InternalRegionArguments internalRegionArgs
final GemFireCacheImpl.StaticSystemCallbacks sysCb =
GemFireCacheImpl.FactoryStatics.systemCallbacks;
if (sysCb != null && sysCb.destroyExistingRegionInCreate(dsi, this)) {
LogWriter logger = getCache().getLogger();
if (logger.infoEnabled()) {
logger.info("Destroying existing region: " + getFullPath() + " in create");
try {
dsi.destroyRegion(getFullPath(), true);
LogWriter logger = getCache().getLogger();
if (logger.infoEnabled()) {
logger.info("Destroyed existing disk region: " + this + " in create");
}
} catch (IllegalArgumentException ignored) {
}
dsi.destroyRegion(getFullPath(), false);
}

DiskRegionStats stats;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10822,11 +10822,14 @@ protected DiskRegion createDiskRegion(InternalRegionArguments internalRegionArgs
final GemFireCacheImpl.StaticSystemCallbacks sysCb =
GemFireCacheImpl.FactoryStatics.systemCallbacks;
if (sysCb != null && sysCb.destroyExistingRegionInCreate(dsi, this)) {
LogWriter logger = getCache().getLogger();
if (logger.infoEnabled()) {
logger.info("Destroying existing region: " + this + " in create");
try {
dsi.destroyRegion(getFullPath(), true);
LogWriter logger = getCache().getLogger();
if (logger.infoEnabled()) {
logger.info("Destroyed existing disk region: " + this + " in create");
}
} catch (IllegalArgumentException ignored) {
}
dsi.destroyRegion(getFullPath(), false);
}
}
return null;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@
package com.gemstone.gemfire.internal.offheap;

import com.gemstone.gemfire.internal.SharedLibrary;
import com.gemstone.gemfire.internal.shared.unsafe.UnsafeHolder;
import com.gemstone.gemfire.pdx.internal.unsafe.UnsafeWrapper;
import org.apache.spark.unsafe.Platform;

public final class UnsafeMemoryChunk implements MemoryChunk {
private static final UnsafeWrapper unsafe;
Expand Down Expand Up @@ -99,7 +99,8 @@ public static void readUnsafeBytes(final long addr, final byte[] bytes,
assert SimpleMemoryAllocatorImpl.validateAddressAndSizeWithinSlab(addr,
length);

Platform.copyMemory(null, addr, bytes, Platform.BYTE_ARRAY_OFFSET, length);
UnsafeHolder.copyMemory(null, addr, bytes,
UnsafeHolder.BYTE_ARRAY_OFFSET, length);
}

/**
Expand All @@ -113,8 +114,8 @@ public static void readUnsafeBytes(long addr, int offset, final byte[] bytes,
assert SimpleMemoryAllocatorImpl.validateAddressAndSizeWithinSlab(addr,
length + offset);

Platform.copyMemory(null, addr + offset, bytes,
Platform.BYTE_ARRAY_OFFSET + bytesOffset, length);
UnsafeHolder.copyMemory(null, addr + offset, bytes,
UnsafeHolder.BYTE_ARRAY_OFFSET + bytesOffset, length);
}

public static byte readAbsoluteByte(long addr) {
Expand Down Expand Up @@ -223,8 +224,8 @@ public static void readAbsoluteBytes(long addr, int addrOffset, byte[] bytes,
assert SimpleMemoryAllocatorImpl.validateAddressAndSizeWithinSlab(addr,
size + addrOffset);

Platform.copyMemory(null, addr + addrOffset, bytes,
Platform.BYTE_ARRAY_OFFSET + bytesOffset, size);
UnsafeHolder.copyMemory(null, addr + addrOffset, bytes,
UnsafeHolder.BYTE_ARRAY_OFFSET + bytesOffset, size);
}

@Override
Expand All @@ -246,7 +247,7 @@ public static void writeAbsoluteBytes(long addr, byte[] bytes, int bytesOffset,
}
assert SimpleMemoryAllocatorImpl.validateAddressAndSizeWithinSlab(addr, size);

Platform.copyMemory(bytes, Platform.BYTE_ARRAY_OFFSET + bytesOffset,
UnsafeHolder.copyMemory(bytes, UnsafeHolder.BYTE_ARRAY_OFFSET + bytesOffset,
null, addr, size);
}

Expand Down
1 change: 0 additions & 1 deletion gemfire-shared/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -18,5 +18,4 @@
dependencies {
compileOnly project(subprojectBase + 'gemfire-trove')
compile "net.java.dev.jna:jna:${jnaVersion}"
compile "org.apache.spark:spark-unsafe_${scalaBinaryVersion}:${sparkVersion}"
}
Original file line number Diff line number Diff line change
Expand Up @@ -22,14 +22,20 @@
import com.gemstone.gemfire.internal.shared.unsafe.DirectBufferAllocator;
import com.gemstone.gemfire.internal.shared.unsafe.FreeMemory;
import com.gemstone.gemfire.internal.shared.unsafe.UnsafeHolder;
import org.apache.spark.unsafe.Platform;
import org.apache.spark.unsafe.memory.MemoryAllocator;

/**
* Allocate, release and expand ByteBuffers (in-place if possible).
*/
public abstract class BufferAllocator implements Closeable {

public static final boolean MEMORY_DEBUG_FILL_ENABLED = Boolean.parseBoolean(
System.getProperty("spark.memory.debugFill", "false"));

// Same as jemalloc's debug fill values.
public static final byte MEMORY_DEBUG_FILL_CLEAN_VALUE = (byte)0xa5;
// @SuppressWarnings("WeakerAccess")
public static final byte MEMORY_DEBUG_FILL_FREED_VALUE = (byte)0x5a;

public static final String STORE_DATA_FRAME_OUTPUT =
"STORE_DATA_FRAME_OUTPUT";

Expand Down Expand Up @@ -66,8 +72,8 @@ public ByteBuffer allocateWithFallback(int size, String owner) {
* Fill the given portion of the buffer setting it with given byte.
*/
public final void fill(ByteBuffer buffer, byte b, int position, int numBytes) {
Platform.setMemory(baseObject(buffer), baseOffset(buffer) + position,
numBytes, b);
UnsafeHolder.getUnsafe().setMemory(baseObject(buffer),
baseOffset(buffer) + position, numBytes, b);
}

/**
Expand Down Expand Up @@ -142,18 +148,18 @@ public final void release(ByteBuffer buffer) {
*/
public static boolean releaseBuffer(ByteBuffer buffer) {
final boolean hasArray = buffer.hasArray();
if (MemoryAllocator.MEMORY_DEBUG_FILL_ENABLED) {
if (MEMORY_DEBUG_FILL_ENABLED) {
Object baseObject;
long baseOffset;
if (hasArray) {
baseObject = buffer.array();
baseOffset = Platform.BYTE_ARRAY_OFFSET + buffer.arrayOffset();
baseOffset = UnsafeHolder.BYTE_ARRAY_OFFSET + buffer.arrayOffset();
} else {
baseObject = null;
baseOffset = UnsafeHolder.getDirectBufferAddress(buffer);
}
Platform.setMemory(baseObject, baseOffset, buffer.capacity(),
MemoryAllocator.MEMORY_DEBUG_FILL_FREED_VALUE);
UnsafeHolder.getUnsafe().setMemory(baseObject, baseOffset, buffer.capacity(),
MEMORY_DEBUG_FILL_FREED_VALUE);
}
// Actual release should depend on buffer type and not allocator type.
// Reserved off-heap space will be decremented by FreeMemory implementation.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,11 +70,11 @@
import javax.naming.directory.InitialDirContext;

import com.gemstone.gemfire.internal.shared.unsafe.DirectBufferAllocator;
import com.gemstone.gemfire.internal.shared.unsafe.UnsafeHolder;
import org.apache.log4j.Appender;
import org.apache.log4j.FileAppender;
import org.apache.log4j.LogManager;
import org.apache.log4j.PropertyConfigurator;
import org.apache.spark.unsafe.Platform;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand Down Expand Up @@ -1948,16 +1948,17 @@ public static boolean equalBuffers(final byte[] bytes,
return false;
}
// read in longs to minimize ByteBuffer get() calls
final sun.misc.Unsafe unsafe = UnsafeHolder.getUnsafe();
int pos = buffer.position();
final int endPos = (pos + len);
final boolean sameOrder = ByteOrder.nativeOrder() == buffer.order();
// round off to nearest factor of 8 to read in longs
final int endRound8Pos = (len % 8) != 0 ? (endPos - 8) : endPos;
long indexPos = Platform.BYTE_ARRAY_OFFSET;
long indexPos = UnsafeHolder.BYTE_ARRAY_OFFSET;
while (pos < endRound8Pos) {
// splitting into longs is faster than reading one byte at a time even
// though it costs more operations (about 20% in micro-benchmarks)
final long s = Platform.getLong(bytes, indexPos);
final long s = unsafe.getLong(bytes, indexPos);
final long v = buffer.getLong(pos);
if (sameOrder) {
if (s != v) {
Expand All @@ -1970,7 +1971,7 @@ public static boolean equalBuffers(final byte[] bytes,
indexPos += 8;
}
while (pos < endPos) {
if (Platform.getByte(bytes, indexPos) != buffer.get(pos)) {
if (unsafe.getByte(bytes, indexPos) != buffer.get(pos)) {
return false;
}
pos++;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,7 @@

import java.nio.ByteBuffer;

import org.apache.spark.unsafe.Platform;
import org.apache.spark.unsafe.memory.MemoryAllocator;
import com.gemstone.gemfire.internal.shared.unsafe.UnsafeHolder;

/**
* Heap ByteBuffer implementation of {@link BufferAllocator}.
Expand All @@ -44,16 +43,16 @@ public ByteBuffer allocate(int size, String owner) {
@Override
public ByteBuffer allocateForStorage(int size) {
ByteBuffer buffer = ByteBuffer.allocate(size);
if (MemoryAllocator.MEMORY_DEBUG_FILL_ENABLED) {
fill(buffer, MemoryAllocator.MEMORY_DEBUG_FILL_CLEAN_VALUE);
if (BufferAllocator.MEMORY_DEBUG_FILL_ENABLED) {
fill(buffer, BufferAllocator.MEMORY_DEBUG_FILL_CLEAN_VALUE);
}
return buffer;
}

@Override
public void clearPostAllocate(ByteBuffer buffer, int position) {
// JVM clears the allocated area, so only clear for DEBUG_FILL case
if (MemoryAllocator.MEMORY_DEBUG_FILL_ENABLED) {
if (BufferAllocator.MEMORY_DEBUG_FILL_ENABLED) {
// clear till the capacity and not limit since former will be a factor
// of 8 and hence more efficient in Unsafe.setMemory
fill(buffer, (byte)0, position, buffer.capacity() - position);
Expand All @@ -67,7 +66,7 @@ public Object baseObject(ByteBuffer buffer) {

@Override
public long baseOffset(ByteBuffer buffer) {
return Platform.BYTE_ARRAY_OFFSET + buffer.arrayOffset();
return UnsafeHolder.BYTE_ARRAY_OFFSET + buffer.arrayOffset();
}

@Override
Expand All @@ -80,11 +79,11 @@ public ByteBuffer expand(ByteBuffer buffer, int required, String owner) {
final int newLength = BufferAllocator.expandedSize(currentUsed, required);
final byte[] newBytes = new byte[newLength];
System.arraycopy(bytes, buffer.arrayOffset(), newBytes, 0, currentUsed);
if (MemoryAllocator.MEMORY_DEBUG_FILL_ENABLED) {
if (BufferAllocator.MEMORY_DEBUG_FILL_ENABLED) {
// fill the remaining bytes
ByteBuffer buf = ByteBuffer.wrap(newBytes, currentUsed,
newLength - currentUsed);
fill(buf, MemoryAllocator.MEMORY_DEBUG_FILL_CLEAN_VALUE);
fill(buf, BufferAllocator.MEMORY_DEBUG_FILL_CLEAN_VALUE);
}
return ByteBuffer.wrap(newBytes).order(buffer.order());
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,8 @@ public static boolean isUsingGemFireXDEntryPoint() {
"io.snappydata.gemxd.SnappyDataVersion".equals(frameCls) ||
"org.apache.spark.sql.SnappySession$".equals(frameCls) ||
"org.apache.spark.sql.SnappySession".equals(frameCls) ||
"org.apache.spark.sql.SparkSupport$".equals(frameCls) ||
"org.apache.spark.sql.SparkSupport".equals(frameCls) ||
"io.snappydata.gemxd.ClusterCallbacksImpl$".equals(frameCls)) {
return true;
}
Expand Down
Loading