Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import com.apple.foundationdb.linear.Quantizer;
import com.apple.foundationdb.subspace.Subspace;
import com.apple.foundationdb.tuple.Tuple;
import com.google.common.base.Verify;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand Down Expand Up @@ -98,6 +99,34 @@ public NodeFactory<N> getNodeFactory() {
return nodeFactory;
}

@Override
public boolean isInliningStorageAdapter() {
final boolean isInliningStorageAdapter = getNodeFactory().getNodeKind() == NodeKind.INLINING;
Verify.verify(!isInliningStorageAdapter || this instanceof InliningStorageAdapter);
return isInliningStorageAdapter;
}

@Nonnull
@Override
public InliningStorageAdapter asInliningStorageAdapter() {
Verify.verify(isInliningStorageAdapter());
return (InliningStorageAdapter)this;
}

@Override
public boolean isCompactStorageAdapter() {
final boolean isCompactStorageAdapter = getNodeFactory().getNodeKind() == NodeKind.COMPACT;
Verify.verify(!isCompactStorageAdapter || this instanceof CompactStorageAdapter);
return isCompactStorageAdapter;
}

@Nonnull
@Override
public CompactStorageAdapter asCompactStorageAdapter() {
Verify.verify(isCompactStorageAdapter());
return (CompactStorageAdapter)this;
}

@Override
@Nonnull
public Subspace getSubspace() {
Expand Down Expand Up @@ -130,23 +159,6 @@ public OnReadListener getOnReadListener() {
return onReadListener;
}

/**
* Asynchronously fetches a node from a specific layer of the HNSW.
* <p>
* The node is identified by its {@code layer} and {@code primaryKey}. The entire fetch operation is
* performed within the given {@link ReadTransaction}. After the underlying
* fetch operation completes, the retrieved node is validated by the
* {@link #checkNode(Node)} method before the returned future is completed.
*
* @param readTransaction the non-null transaction to use for the read operation
* @param storageTransform an affine vector transformation operator that is used to transform the fetched vector
* into the storage space that is currently being used
* @param layer the layer of the tree from which to fetch the node
* @param primaryKey the non-null primary key that identifies the node to fetch
*
* @return a {@link CompletableFuture} that will complete with the fetched {@link AbstractNode}
* once it has been read from storage and validated
*/
@Nonnull
@Override
public CompletableFuture<AbstractNode<N>> fetchNode(@Nonnull final ReadTransaction readTransaction,
Expand All @@ -169,7 +181,7 @@ public CompletableFuture<AbstractNode<N>> fetchNode(@Nonnull final ReadTransacti
* @param primaryKey the primary key that uniquely identifies the node to be fetched; must not be {@code null}
*
* @return a {@link CompletableFuture} that will be completed with the fetched {@link AbstractNode}.
* The future will complete with {@code null} if no node is found for the given key and layer.
* The future will complete with {@code null} if no node is found for the given key and layer.
*/
@Nonnull
protected abstract CompletableFuture<AbstractNode<N>> fetchNodeInternal(@Nonnull ReadTransaction readTransaction,
Expand All @@ -185,7 +197,7 @@ protected abstract CompletableFuture<AbstractNode<N>> fetchNodeInternal(@Nonnull
* @return the node that was passed in
*/
@Nullable
private <T extends Node<N>> T checkNode(@Nullable final T node) {
protected <T extends AbstractNode<N>> T checkNode(@Nullable final T node) {
return node;
}

Expand All @@ -200,23 +212,23 @@ private <T extends Node<N>> T checkNode(@Nullable final T node) {
*
* @param transaction the non-null {@link Transaction} context for this write operation
* @param quantizer the quantizer to use
* @param node the non-null {@link Node} to be written to storage
* @param layer the layer index where the node is being written
* @param node the non-null {@link Node} to be written to storage
* @param changeSet the non-null {@link NeighborsChangeSet} detailing the modifications
* to the node's neighbors
*/
@Override
public void writeNode(@Nonnull final Transaction transaction, @Nonnull final Quantizer quantizer,
@Nonnull final AbstractNode<N> node, final int layer,
final int layer, @Nonnull final AbstractNode<N> node,
@Nonnull final NeighborsChangeSet<N> changeSet) {
writeNodeInternal(transaction, quantizer, node, layer, changeSet);
writeNodeInternal(transaction, quantizer, layer, node, changeSet);
if (logger.isTraceEnabled()) {
logger.trace("written node with key={} at layer={}", node.getPrimaryKey(), layer);
}
}

/**
* Writes a single node to the data store as part of a larger transaction.
* Writes a single node to the given layer of the data store as part of a larger transaction.
* <p>
* This is an abstract method that concrete implementations must provide.
* It is responsible for the low-level persistence of the given {@code node} at a
Expand All @@ -225,12 +237,28 @@ public void writeNode(@Nonnull final Transaction transaction, @Nonnull final Qua
*
* @param transaction the non-null transaction context for the write operation
* @param quantizer the quantizer to use
* @param node the non-null {@link Node} to write
* @param layer the layer or level of the node in the structure
* @param node the non-null {@link Node} to write
* @param changeSet the non-null {@link NeighborsChangeSet} detailing additions or
* removals of neighbor links
*/
protected abstract void writeNodeInternal(@Nonnull Transaction transaction, @Nonnull Quantizer quantizer,
@Nonnull AbstractNode<N> node, int layer,
int layer, @Nonnull AbstractNode<N> node,
@Nonnull NeighborsChangeSet<N> changeSet);

@Override
public void deleteNode(@Nonnull final Transaction transaction, final int layer, @Nonnull final Tuple primaryKey) {
deleteNodeInternal(transaction, layer, primaryKey);
if (logger.isTraceEnabled()) {
logger.trace("deleted node with key={} at layer={}", primaryKey, layer);
}
}

/**
* Deletes a single node from the given layer of the data store as part of a larger transaction.
* @param transaction the transaction to use
* @param layer the layer
* @param primaryKey the primary key of the node
*/
protected abstract void deleteNodeInternal(@Nonnull Transaction transaction, int layer, @Nonnull Tuple primaryKey);
}
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,15 @@ public BaseNeighborsChangeSet<N> getParent() {
return null;
}

/**
* Returns {@code false} as this change set is a base change set. It does not represent any changes.
* @return {@code false} as this change set does not have any changes.
*/
@Override
public boolean hasChanges() {
return false;
}

/**
* Retrieves the list of neighbors associated with this object.
* <p>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,12 @@ public CompactStorageAdapter(@Nonnull final Config config,
super(config, nodeFactory, subspace, onWriteListener, onReadListener);
}

@Nonnull
@Override
public Transformed<RealVector> getVector(@Nonnull final NodeReference nodeReference, @Nonnull final AbstractNode<NodeReference> node) {
return node.asCompactNode().getVector();
}

/**
* Asynchronously fetches a node from the database for a given layer and primary key.
* <p>
Expand All @@ -88,17 +94,14 @@ public CompactStorageAdapter(@Nonnull final Config config,
*
* @return a future that will complete with the fetched {@link AbstractNode} or {@code null} if the node cannot
* be fetched
*
* @throws IllegalStateException if the node cannot be found in the database for the given key
*/
@Nonnull
@Override
protected CompletableFuture<AbstractNode<NodeReference>> fetchNodeInternal(@Nonnull final ReadTransaction readTransaction,
@Nonnull final AffineOperator storageTransform,
final int layer,
@Nonnull final Tuple primaryKey) {
final byte[] keyBytes = getDataSubspace().pack(Tuple.from(layer, primaryKey));

final byte[] keyBytes = getNodeKey(layer, primaryKey);
return readTransaction.get(keyBytes)
.thenApply(valueBytes -> {
if (valueBytes == null) {
Expand Down Expand Up @@ -216,16 +219,16 @@ private AbstractNode<NodeReference> compactNodeFromTuples(@Nonnull final AffineO
*
* @param transaction the {@link Transaction} to use for the write operation.
* @param quantizer the quantizer to use
* @param node the {@link AbstractNode} to be serialized and written; it is processed as a {@link CompactNode}.
* @param layer the graph layer index for the node, used to construct the storage key.
* @param node the {@link AbstractNode} to be serialized and written; it is processed as a {@link CompactNode}.
* @param neighborsChangeSet a {@link NeighborsChangeSet} containing the additions and removals, which are
* merged to determine the final set of neighbors to be written.
*/
@Override
public void writeNodeInternal(@Nonnull final Transaction transaction, @Nonnull final Quantizer quantizer,
@Nonnull final AbstractNode<NodeReference> node, final int layer,
final int layer, @Nonnull final AbstractNode<NodeReference> node,
@Nonnull final NeighborsChangeSet<NodeReference> neighborsChangeSet) {
final byte[] key = getDataSubspace().pack(Tuple.from(layer, node.getPrimaryKey()));
final byte[] key = getNodeKey(layer, node.getPrimaryKey());

final List<Object> nodeItems = Lists.newArrayListWithExpectedSize(3);
nodeItems.add(NodeKind.COMPACT.getSerialized());
Expand Down Expand Up @@ -254,6 +257,33 @@ public void writeNodeInternal(@Nonnull final Transaction transaction, @Nonnull f
}
}

@Override
protected void deleteNodeInternal(@Nonnull final Transaction transaction, final int layer,
@Nonnull final Tuple primaryKey) {
final byte[] key = getNodeKey(layer, primaryKey);
transaction.clear(key);
getOnWriteListener().onNodeDeleted(layer, primaryKey);
getOnWriteListener().onKeyDeleted(layer, key);
}

/**
* Constructs the raw database key for a node based on its layer and primary key.
* <p>
* This key is created by packing a tuple containing the specified {@code layer} and the node's {@code primaryKey}
* within the data subspace. The resulting byte array is suitable for use in direct database lookups and preserves
* the sort order of the components.
*
* @param layer the layer index where the node resides
* @param primaryKey the primary key that uniquely identifies the node within its layer,
* encapsulated in a {@link Tuple}
*
* @return a byte array representing the packed key for the specified node
*/
@Nonnull
private byte[] getNodeKey(final int layer, @Nonnull final Tuple primaryKey) {
return getDataSubspace().pack(Tuple.from(layer, primaryKey));
}

/**
* Scans a given layer for nodes, returning an iterable over the results.
* <p>
Expand Down
Loading
Loading