From 40df85413af18494ee14b67db6186f13c8966d3b Mon Sep 17 00:00:00 2001 From: Nick Kreeger Date: Tue, 26 Mar 2019 19:30:14 -0700 Subject: [PATCH 1/5] save --- .vscode/c_cpp_properties.json | 4 +-- binding/napi_auto_ref.h | 67 +++++++++++++++++++++++++++++++++++ binding/tfjs_backend.cc | 38 +++++++++++++++++--- tensor_alloc_test.js | 14 ++++++++ 4 files changed, 117 insertions(+), 6 deletions(-) create mode 100644 binding/napi_auto_ref.h create mode 100644 tensor_alloc_test.js diff --git a/.vscode/c_cpp_properties.json b/.vscode/c_cpp_properties.json index 599f70e5..a3816b14 100644 --- a/.vscode/c_cpp_properties.json +++ b/.vscode/c_cpp_properties.json @@ -7,7 +7,7 @@ "${workspaceFolder}/deps/include", "${workspaceFolder}/deps/tensorflow/include/tensorflow/c", "${workspaceFolder}/deps/tensorflow/include/tensorflow/c/eager", - "${env.HOME}/.node-gyp/10.3.0/include/node" + "${env.HOME}/.node-gyp/10.15.3/include/node" ], "defines": [], "intelliSenseMode": "clang-x64", @@ -17,7 +17,7 @@ "${workspaceFolder}/deps/include", "${workspaceFolder}/deps/tensorflow/include/tensorflow/c", "${workspaceFolder}/deps/tensorflow/include/tensorflow/c/eager", - "${env.HOME}/.node-gyp/10.3.0/include/node" + "${env.HOME}/.node-gyp/10.15.3/include/node" ], "limitSymbolsToIncludedHeaders": true, "databaseFilename": "" diff --git a/binding/napi_auto_ref.h b/binding/napi_auto_ref.h new file mode 100644 index 00000000..7e3b2698 --- /dev/null +++ b/binding/napi_auto_ref.h @@ -0,0 +1,67 @@ +/** + * @license + * Copyright 2019 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +#ifndef TF_NODEJS_NAPI_AUTO_REF_H_ +#define TF_NODEJS_NAPI_AUTO_REF_H_ + +#include +#include "util.h" + +namespace tfnodejs { + +// Automatically cleans up a TF_Status instance. +class NapiAutoRef { + public: + NapiAutoRef() : env_(nullptr), ref_(nullptr) {} + + napi_status Init(napi_env env, napi_value value) { + env_ = env; + return napi_create_reference(env_, value, 1, &ref_); + } + + napi_status Cleanup() { + if (!env_ || !ref_) { + fprintf(stderr, + "WARNING: Un-initialized reference attempted to cleanup!\n"); + return napi_invalid_arg; + } + + napi_status nstatus = napi_delete_reference(env_, ref_); + env_ = nullptr; + ref_ = nullptr; + return nstatus; + } + + virtual ~NapiAutoRef() { + if (env_) { + // TODO warn + fprintf(stderr, "WARNING: Non-cleaned up reference!\n"); + } + if (ref_) { + fprintf(stderr, "WARNING: Non-cleaned up reference!\n"); + // TODO warn + } + } + + private: + napi_env env_; + napi_ref ref_; +}; + +} // namespace tfnodejs + +#endif // TF_NODEJS_TF_AUTO_STATUS_H_ diff --git a/binding/tfjs_backend.cc b/binding/tfjs_backend.cc index 50ba3317..e610de3a 100644 --- a/binding/tfjs_backend.cc +++ b/binding/tfjs_backend.cc @@ -17,6 +17,7 @@ #include "tfjs_backend.h" +#include "napi_auto_ref.h" #include "tf_auto_tensor.h" #include "tfe_auto_op.h" #include "utils.h" @@ -32,6 +33,21 @@ namespace tfnodejs { // Used to hold strings beyond the lifetime of a JS call. static std::set ATTR_NAME_SET; +// Cleans up extra reference count for shared V8/TF tensor memory: +static void DeallocTensor(void *data, size_t len, void *arg) { + NapiAutoRef *auto_ref = static_cast(arg); + if (!auto_ref) { + fprintf(stderr, "WARNING: Invalid TF_Tensor deallocator arg!\n"); + return; + } + + napi_status nstatus = auto_ref->Cleanup(); + if (nstatus != napi_ok) { + fprintf(stderr, "WARNING: Invalid status cleaning up reference!\n"); + } + delete auto_ref; +} + // Creates a TFE_TensorHandle from a JS typed array. TFE_TensorHandle *CreateTFE_TensorHandleFromTypedArray(napi_env env, int64_t *shape, @@ -139,14 +155,27 @@ TFE_TensorHandle *CreateTFE_TensorHandleFromTypedArray(napi_env env, // and the byte size of the tensor dtype needs to be special-cased for int64. const size_t byte_size = dtype == TF_INT64 ? num_elements * width * 2 : num_elements * width; - TF_AutoTensor tensor( - TF_AllocateTensor(dtype, shape, shape_length, byte_size)); - memcpy(TF_TensorData(tensor.tensor), array_data, byte_size); + + // Sharing memory with V8 requires adding an additional refcount. When the + // Tensor is deleted, the ref count will be reduced. + NapiAutoRef *auto_ref = new NapiAutoRef(); + nstatus = auto_ref->Init(env, array_value); + if (nstatus != napi_ok) { + delete auto_ref; + ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); + } + + TF_AutoTensor tensor(TF_NewTensor(dtype, shape, shape_length, array_data, + byte_size, DeallocTensor, auto_ref)); TF_AutoStatus tf_status; TFE_TensorHandle *tfe_tensor_handle = TFE_NewTensorHandle(tensor.tensor, tf_status.status); - ENSURE_TF_OK_RETVAL(env, tf_status, nullptr); + if (TF_GetCode(tf_status.status) != TF_OK) { + delete auto_ref; + TFE_DeleteTensorHandle(tfe_tensor_handle); + ENSURE_TF_OK_RETVAL(env, tf_status, nullptr); + } return tfe_tensor_handle; } @@ -292,6 +321,7 @@ void CopyTFE_TensorHandleDataToTypedArray(napi_env env, // current value to the newly allocated NAPI buffer. memcpy(array_buffer_data, TF_TensorData(tensor.tensor), byte_length); + fprintf(stderr, "---> TensorHandle data to TypedArray\n"); nstatus = napi_create_typedarray(env, array_type, num_elements, array_buffer_value, 0, result); ENSURE_NAPI_OK(env, nstatus); diff --git a/tensor_alloc_test.js b/tensor_alloc_test.js new file mode 100644 index 00000000..afba67c9 --- /dev/null +++ b/tensor_alloc_test.js @@ -0,0 +1,14 @@ +const tf = require('./dist/index'); + +// Uncomment to use vanilla cpu backend. +// tf.setBackend('cpu'); +const N = 1000000; +const a = tf.zeros([1, 1]); +const start = process.hrtime(); +for (let i = 0; i < N; i++) { + tf.reshape(a, [1]); + // tf.slice(a, [0, 0], [1, 1]); +} +const end = process.hrtime(start); +const elapsed = end[0] * 1000 + end[1] / 1000000; +console.log(Math.round(N / elapsed) + ' ops/ms'); From afdfc5576ca4edace361aef50dbbdbce0b3eec44 Mon Sep 17 00:00:00 2001 From: Nick Kreeger Date: Thu, 28 Mar 2019 20:42:03 -0700 Subject: [PATCH 2/5] save --- binding/tfjs_backend.cc | 155 +++++++++++++++++++++++++---------- binding/tfjs_backend.h | 10 ++- src/debug_test.ts | 4 + src/nodejs_kernel_backend.ts | 29 +++---- src/tfjs_binding.ts | 2 +- src/tfjs_binding_test.ts | 20 +++-- tensor_alloc_test.js | 38 ++++++--- 7 files changed, 175 insertions(+), 83 deletions(-) create mode 100644 src/debug_test.ts diff --git a/binding/tfjs_backend.cc b/binding/tfjs_backend.cc index e610de3a..5cce2cd5 100644 --- a/binding/tfjs_backend.cc +++ b/binding/tfjs_backend.cc @@ -33,6 +33,12 @@ namespace tfnodejs { // Used to hold strings beyond the lifetime of a JS call. static std::set ATTR_NAME_SET; +// Binds a ... +struct TFE_TensorHandleNum { + TFE_TensorHandle *handle; + int32_t handle_num; +}; + // Cleans up extra reference count for shared V8/TF tensor memory: static void DeallocTensor(void *data, size_t len, void *arg) { NapiAutoRef *auto_ref = static_cast(arg); @@ -321,7 +327,6 @@ void CopyTFE_TensorHandleDataToTypedArray(napi_env env, // current value to the newly allocated NAPI buffer. memcpy(array_buffer_data, TF_TensorData(tensor.tensor), byte_length); - fprintf(stderr, "---> TensorHandle data to TypedArray\n"); nstatus = napi_create_typedarray(env, array_type, num_elements, array_buffer_value, 0, result); ENSURE_NAPI_OK(env, nstatus); @@ -689,7 +694,9 @@ void AssignOpAttr(napi_env env, TFE_Op *tfe_op, napi_value attr_value) { } } -TFJSBackend::TFJSBackend(napi_env env) : next_tensor_id_(0) { +TFJSBackend::TFJSBackend(napi_env env) + : tfe_handle_map_(new std::map()), + next_tensor_id_(0) { TF_AutoStatus tf_status; TFE_ContextOptions *tfe_options = TFE_NewContextOptions(); tfe_context_ = TFE_NewContext(tfe_options, tf_status.status); @@ -734,7 +741,7 @@ TFJSBackend::TFJSBackend(napi_env env) : next_tensor_id_(0) { } TFJSBackend::~TFJSBackend() { - for (auto &kv : tfe_handle_map_) { + for (auto &kv : *tfe_handle_map_) { TFE_DeleteTensorHandle(kv.second); } if (tfe_context_ != nullptr) { @@ -744,9 +751,86 @@ TFJSBackend::~TFJSBackend() { TFJSBackend *TFJSBackend::Create(napi_env env) { return new TFJSBackend(env); } -int32_t TFJSBackend::InsertHandle(TFE_TensorHandle *tfe_handle) { - return tfe_handle_map_.insert(std::make_pair(next_tensor_id_++, tfe_handle)) - .first->first; +static int32_t GC_COUNT = 0; + +// TODO - move to top of method... +static void TFEHandlePairFinalize(napi_env env, void *data, void *hint) { + std::map *tfe_handle_map = + static_cast *>(data); + if (!tfe_handle_map) { + fprintf(stderr, "----> EXCEPTION HANDLE MAP IS NOT VALID!!!\n"); + return; + } + + napi_value tensor_id_value = static_cast(hint); + if (tensor_id_value == nullptr) { + fprintf(stderr, "----> EXCEPTION TENSOR ID IS NOT VALID!!!\n"); + return; + } + + // TODO - move cleanup to static method? + int32_t tensor_id; + napi_get_value_int32(env, tensor_id_value, &tensor_id); + + // TODO - cleanup/refactor this... Use heap ints??? fragmentation? + auto tensor_entry = tfe_handle_map->find(tensor_id); + if (tensor_entry == tfe_handle_map->end()) { + // NAPI_THROW_ERROR(env, + // "Delete called on a Tensor not referenced (tensor_id: + // %d)", tensor_id); + return; + } + + GC_COUNT++; + // if (GC_COUNT % 100 == 0) { + fprintf(stderr, "GC_COUNT: %d (TENSOR_ID: %d)\n", GC_COUNT, tensor_id); + // } + TFE_DeleteTensorHandle(tensor_entry->second); + tfe_handle_map->erase(tensor_entry); +} + +napi_status TFJSBackend::CreateTensorMetadataValue( + napi_env env, TFE_TensorHandle *tfe_handle, napi_value shape_value, + napi_value dtype_value, napi_value *tensor_metadata_value) { + napi_status nstatus; + + // First bump tensor index and insert into the handle map: + int32_t next_idx = next_tensor_id_++; // XXX heap? + tfe_handle_map_->insert(std::make_pair(next_idx, tfe_handle)); + + if (next_idx % 1000 == 0) { + fprintf(stderr, ":: next_id: %d\n", next_idx); + } + + // Next, create an object to represent the TensorMetadata class. + nstatus = napi_create_object(env, tensor_metadata_value); + ENSURE_NAPI_OK_RETVAL(env, nstatus, nstatus); + + // Assign all values of the TensorMetadata class: + napi_value id_value; + nstatus = napi_create_int32(env, next_idx, &id_value); + ENSURE_NAPI_OK_RETVAL(env, nstatus, nstatus); + + nstatus = + napi_set_named_property(env, *tensor_metadata_value, "id", id_value); + ENSURE_NAPI_OK_RETVAL(env, nstatus, nstatus); + + nstatus = napi_set_named_property(env, *tensor_metadata_value, "shape", + shape_value); + ENSURE_NAPI_OK_RETVAL(env, nstatus, nstatus); + + nstatus = napi_set_named_property(env, *tensor_metadata_value, "dtype", + dtype_value); + ENSURE_NAPI_OK_RETVAL(env, nstatus, nstatus); + + // Next create an external JS object that can be tracked for GC. This object + // must be tracked to ensure the underlying TFE_TensorHandle data is cleanedup + // when Tensor reference is GC'd. + nstatus = napi_wrap(env, *tensor_metadata_value, tfe_handle_map_, + TFEHandlePairFinalize, id_value, nullptr); + ENSURE_NAPI_OK_RETVAL(env, nstatus, nstatus); + + return napi_ok; } napi_value TFJSBackend::CreateTensor(napi_env env, napi_value shape_value, @@ -786,18 +870,20 @@ napi_value TFJSBackend::CreateTensor(napi_env env, napi_value shape_value, tfe_handle = new_handle; } - napi_value output_tensor_id; - nstatus = napi_create_int32(env, InsertHandle(tfe_handle), &output_tensor_id); + napi_value tensor_metadata_value; + nstatus = CreateTensorMetadataValue(env, tfe_handle, shape_value, dtype_value, + &tensor_metadata_value); ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); - return output_tensor_id; + + return tensor_metadata_value; } void TFJSBackend::DeleteTensor(napi_env env, napi_value tensor_id_value) { int32_t tensor_id; ENSURE_NAPI_OK(env, napi_get_value_int32(env, tensor_id_value, &tensor_id)); - auto tensor_entry = tfe_handle_map_.find(tensor_id); - if (tensor_entry == tfe_handle_map_.end()) { + auto tensor_entry = tfe_handle_map_->find(tensor_id); + if (tensor_entry == tfe_handle_map_->end()) { NAPI_THROW_ERROR(env, "Delete called on a Tensor not referenced (tensor_id: %d)", tensor_id); @@ -805,7 +891,7 @@ void TFJSBackend::DeleteTensor(napi_env env, napi_value tensor_id_value) { } TFE_DeleteTensorHandle(tensor_entry->second); - tfe_handle_map_.erase(tensor_entry); + tfe_handle_map_->erase(tensor_entry); } napi_value TFJSBackend::GetTensorData(napi_env env, @@ -814,8 +900,8 @@ napi_value TFJSBackend::GetTensorData(napi_env env, ENSURE_NAPI_OK_RETVAL( env, napi_get_value_int32(env, tensor_id_value, &tensor_id), nullptr); - auto tensor_entry = tfe_handle_map_.find(tensor_id); - if (tensor_entry == tfe_handle_map_.end()) { + auto tensor_entry = tfe_handle_map_->find(tensor_id); + if (tensor_entry == tfe_handle_map_->end()) { NAPI_THROW_ERROR( env, "Get data called on a Tensor not referenced (tensor_id: %d)", tensor_id); @@ -855,8 +941,8 @@ napi_value TFJSBackend::ExecuteOp(napi_env env, napi_value op_name_value, nstatus = napi_get_value_int32(env, cur_input_id, &cur_input_tensor_id); ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); - auto input_tensor_entry = tfe_handle_map_.find(cur_input_tensor_id); - if (input_tensor_entry == tfe_handle_map_.end()) { + auto input_tensor_entry = tfe_handle_map_->find(cur_input_tensor_id); + if (input_tensor_entry == tfe_handle_map_->end()) { NAPI_THROW_ERROR(env, "Input Tensor ID not referenced (tensor_id: %d)", cur_input_tensor_id); return nullptr; @@ -899,42 +985,25 @@ napi_value TFJSBackend::ExecuteOp(napi_env env, napi_value op_name_value, nstatus = napi_create_array_with_length(env, size, &output_tensor_infos); ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); + // TODO(kreeger): look at napi_adjust_external_memory for GC/heap usage in + // this block for (int32_t i = 0; i < num_outputs; i++) { - // Output tensor info object: - napi_value tensor_info_value; - nstatus = napi_create_object(env, &tensor_info_value); - ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); - TFE_TensorHandle *handle = result_handles[i]; - // Output tensor ID: - napi_value output_tensor_id_value; - nstatus = - napi_create_int32(env, InsertHandle(handle), &output_tensor_id_value); - ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); - - nstatus = napi_set_named_property(env, tensor_info_value, "id", - output_tensor_id_value); - ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); - - // Output tensor shape: napi_value shape_value; - GetTFE_TensorHandleShape(env, handle, &shape_value); + GetTFE_TensorHandleShape(env, handle, &shape_value); // nstatus?? - nstatus = - napi_set_named_property(env, tensor_info_value, "shape", shape_value); - ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); - - // Output tensor dtype: - napi_value type_value; - GetTFE_TensorHandleType(env, handle, &type_value); + napi_value dtype_value; + GetTFE_TensorHandleType(env, handle, &dtype_value); - nstatus = - napi_set_named_property(env, tensor_info_value, "dtype", type_value); + napi_value tensor_metadata_value; + nstatus = CreateTensorMetadataValue(env, handle, shape_value, dtype_value, + &tensor_metadata_value); ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); // Push into output array - nstatus = napi_set_element(env, output_tensor_infos, i, tensor_info_value); + nstatus = + napi_set_element(env, output_tensor_infos, i, tensor_metadata_value); ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); } diff --git a/binding/tfjs_backend.h b/binding/tfjs_backend.h index b8451b97..140d55ae 100644 --- a/binding/tfjs_backend.h +++ b/binding/tfjs_backend.h @@ -63,10 +63,16 @@ class TFJSBackend { TFJSBackend(napi_env env); ~TFJSBackend(); - int32_t InsertHandle(TFE_TensorHandle* tfe_handle); + // TODO - doc me. + napi_status CreateTensorMetadataValue(napi_env env, + TFE_TensorHandle* tfe_handle, + napi_value shape_value, + napi_value dtype_value, + napi_value* tensor_metadata_value); TFE_Context* tfe_context_; - std::map tfe_handle_map_; + // TODO (type-def this thing) + std::map* tfe_handle_map_; int32_t next_tensor_id_; std::string device_name; }; diff --git a/src/debug_test.ts b/src/debug_test.ts new file mode 100644 index 00000000..178a82ab --- /dev/null +++ b/src/debug_test.ts @@ -0,0 +1,4 @@ +import * as tf from './index'; + +const c = tf.add([1, 2], [3, 4]); +console.log(c.dataSync()); diff --git a/src/nodejs_kernel_backend.ts b/src/nodejs_kernel_backend.ts index 31ef5213..51954552 100644 --- a/src/nodejs_kernel_backend.ts +++ b/src/nodejs_kernel_backend.ts @@ -29,10 +29,8 @@ import {createTensorsTypeOpAttr, createTypeOpAttr, getTFDType} from './ops/op_ut import {TensorMetadata, TFEOpAttr, TFJSBinding} from './tfjs_binding'; type TensorInfo = { - shape: number[], - dtype: number, + metadata: TensorMetadata, values: Float32Array|Int32Array|Uint8Array, - id: number }; interface DataId {} @@ -77,12 +75,7 @@ export class NodeJSKernelBackend extends KernelBackend { private createOutputTensor(metadata: TensorMetadata): Tensor { const newId = {}; - this.tensorMap.set(newId, { - shape: metadata.shape, - dtype: metadata.dtype, - id: metadata.id, - values: null - }); + this.tensorMap.set(newId, {metadata, values: null}); let dtype: DataType; switch (metadata.dtype) { @@ -122,18 +115,19 @@ export class NodeJSKernelBackend extends KernelBackend { if (info.values != null) { // Values were delayed to write into the TensorHandle. Do that before // Op execution and clear stored values. - info.id = - this.binding.createTensor(info.shape, info.dtype, info.values); + info.metadata = this.binding.createTensor( + info.metadata.shape, info.metadata.dtype, info.values); info.values = null; this.tensorMap.set((tensors[i] as Tensor).dataId, info); } - ids.push(info.id); + ids.push(info.metadata.id); } else if (tensors[i] instanceof Int64Scalar) { // Then `tensors[i]` is a Int64Scalar, which we currently represent // using an `Int32Array`. const value = (tensors[i] as Int64Scalar).valueArray; - const id = this.binding.createTensor([], this.binding.TF_INT64, value); - ids.push(id); + const metadata = + this.binding.createTensor([], this.binding.TF_INT64, value); + ids.push(metadata.id); } else { throw new Error(`Invalid Tensor type: ${typeof tensors[i]}`); } @@ -201,12 +195,12 @@ export class NodeJSKernelBackend extends KernelBackend { if (info.values != null) { return info.values; } else { - return this.binding.tensorDataSync(info.id); + return this.binding.tensorDataSync(info.metadata.id); } } disposeData(dataId: object): void { - const id = this.tensorMap.get(dataId).id; + const id = this.tensorMap.get(dataId).metadata.id; if (id != null && id >= 0) { this.binding.deleteTensor(id); } @@ -226,7 +220,8 @@ export class NodeJSKernelBackend extends KernelBackend { register(dataId: object, shape: number[], dtype: DataType): void { if (!this.tensorMap.has(dataId)) { this.tensorMap.set( - dataId, {shape, dtype: getTFDType(dtype), values: null, id: -1}); + dataId, + {metadata: {id: -1, shape, dtype: getTFDType(dtype)}, values: null}); } } diff --git a/src/tfjs_binding.ts b/src/tfjs_binding.ts index 1fcd4853..bc53bea1 100644 --- a/src/tfjs_binding.ts +++ b/src/tfjs_binding.ts @@ -34,7 +34,7 @@ export interface TFJSBinding { // Creates a tensor with the backend: createTensor( shape: number[], dtype: number, - buffer: Float32Array|Int32Array|Uint8Array): number; + buffer: Float32Array|Int32Array|Uint8Array): TensorMetadata; // Deletes a tensor with the backend: deleteTensor(tensorId: number): void; diff --git a/src/tfjs_binding_test.ts b/src/tfjs_binding_test.ts index 07005a29..daaa097b 100644 --- a/src/tfjs_binding_test.ts +++ b/src/tfjs_binding_test.ts @@ -68,10 +68,12 @@ describe('Exposes TF Version', () => { describe('tensor management', () => { it('Creates and deletes a valid tensor', () => { const values = new Int32Array([1, 2]); - const id = binding.createTensor([2], binding.TF_INT32, values); - expect(id).toBeDefined(); + const tensor = binding.createTensor([2], binding.TF_INT32, values); + expect(tensor).toBeDefined(); - binding.deleteTensor(id); + // TODO(kreeger): Check shape and dtype here + + binding.deleteTensor(tensor.id); }); it('throws exception when shape does not match data', () => { expect(() => { @@ -90,9 +92,9 @@ describe('tensor management', () => { it('works with 0-dim tensors', () => { // Reduce op (e.g 'Max') will produce a 0-dim TFE_Tensor. - const inputId = + const input = binding.createTensor([3], binding.TF_INT32, new Int32Array([1, 2, 3])); - const axesId = + const axes = binding.createTensor([1], binding.TF_INT32, new Int32Array([0])); const attrs = [ @@ -102,7 +104,7 @@ describe('tensor management', () => { ]; const outputMetadata = - binding.executeOp('Max', attrs, [inputId, axesId], 1); + binding.executeOp('Max', attrs, [input.id, axes.id], 1); expect(outputMetadata.length).toBe(1); expect(outputMetadata[0].id).toBeDefined(); @@ -120,11 +122,11 @@ describe('executeOp', () => { {name: 'transpose_b', type: binding.TF_ATTR_BOOL, value: false}, {name: 'T', type: binding.TF_ATTR_TYPE, value: binding.TF_FLOAT} ]; - const aId = binding.createTensor( + const a = binding.createTensor( [2, 2], binding.TF_FLOAT, new Float32Array([1, 2, 3, 4])); - const bId = binding.createTensor( + const b = binding.createTensor( [2, 2], binding.TF_FLOAT, new Float32Array([4, 3, 2, 1])); - const matMulInput = [aId, bId]; + const matMulInput = [a.id, b.id]; it('throws exception with invalid Op Name', () => { expect(() => { diff --git a/tensor_alloc_test.js b/tensor_alloc_test.js index afba67c9..4b0b363b 100644 --- a/tensor_alloc_test.js +++ b/tensor_alloc_test.js @@ -1,14 +1,30 @@ const tf = require('./dist/index'); -// Uncomment to use vanilla cpu backend. -// tf.setBackend('cpu'); -const N = 1000000; -const a = tf.zeros([1, 1]); -const start = process.hrtime(); -for (let i = 0; i < N; i++) { - tf.reshape(a, [1]); - // tf.slice(a, [0, 0], [1, 1]); +function sleep(ms) { + return new Promise(resolve => { + setTimeout(resolve, ms); + }); } -const end = process.hrtime(start); -const elapsed = end[0] * 1000 + end[1] / 1000000; -console.log(Math.round(N / elapsed) + ' ops/ms'); + +async function run() { + // console.log('waiting...'); + // await sleep(20000); + // console.log('running...'); + + // Uncomment to use vanilla cpu backend. + // tf.setBackend('cpu'); + const N = 1000000; + const a = tf.zeros([1, 1]); + const start = process.hrtime(); + for (let i = 0; i < N; i++) { + // tf.reshape(a, [1]); // This is bad - a goes GC + tf.slice(a, [0, 0], [1, 1]); + } + const end = process.hrtime(start); + const elapsed = end[0] * 1000 + end[1] / 1000000; + console.log(Math.round(N / elapsed) + ' ops/ms'); + + // await sleep(10000); +} + +run(); From ebcf4796f59aa24d09a0bfc43d943b54b29858f8 Mon Sep 17 00:00:00 2001 From: Nick Kreeger Date: Thu, 28 Mar 2019 20:47:10 -0700 Subject: [PATCH 3/5] save --- src/nodejs_kernel_backend.ts | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/nodejs_kernel_backend.ts b/src/nodejs_kernel_backend.ts index 51954552..fc8890f5 100644 --- a/src/nodejs_kernel_backend.ts +++ b/src/nodejs_kernel_backend.ts @@ -73,6 +73,7 @@ export class NodeJSKernelBackend extends KernelBackend { // Creates a new Tensor and maps the dataId to the passed in ID. private createOutputTensor(metadata: TensorMetadata): Tensor { + // TODO(kreeger): Can we pass ID back from the backend??? const newId = {}; this.tensorMap.set(newId, {metadata, values: null}); @@ -115,6 +116,8 @@ export class NodeJSKernelBackend extends KernelBackend { if (info.values != null) { // Values were delayed to write into the TensorHandle. Do that before // Op execution and clear stored values. + + // TODO(kreeger): Pass object to bind since that is the GC'd key. info.metadata = this.binding.createTensor( info.metadata.shape, info.metadata.dtype, info.values); info.values = null; From bd1844065a10b2cc2c2caeb1d534f058be9574ce Mon Sep 17 00:00:00 2001 From: Nick Kreeger Date: Thu, 28 Mar 2019 20:55:57 -0700 Subject: [PATCH 4/5] save --- src/nodejs_kernel_backend.ts | 1 + src/tfjs_binding.ts | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/src/nodejs_kernel_backend.ts b/src/nodejs_kernel_backend.ts index fc8890f5..ac8a3043 100644 --- a/src/nodejs_kernel_backend.ts +++ b/src/nodejs_kernel_backend.ts @@ -33,6 +33,7 @@ type TensorInfo = { values: Float32Array|Int32Array|Uint8Array, }; +// TODO(kreeger): Move to binding def. interface DataId {} export class NodeJSKernelBackend extends KernelBackend { diff --git a/src/tfjs_binding.ts b/src/tfjs_binding.ts index bc53bea1..41302d8e 100644 --- a/src/tfjs_binding.ts +++ b/src/tfjs_binding.ts @@ -32,17 +32,21 @@ export interface TFJSBinding { TFEOpAttr: typeof TFEOpAttr; // Creates a tensor with the backend: + // TODO(kreeger): Need to pass in the Tensor key object here as well. createTensor( shape: number[], dtype: number, buffer: Float32Array|Int32Array|Uint8Array): TensorMetadata; // Deletes a tensor with the backend: + // TODO(kreeger): Need to pass in the Tensor key as well?? deleteTensor(tensorId: number): void; // Reads data-sync from a tensor on the backend: tensorDataSync(tensorId: number): Float32Array|Int32Array|Uint8Array; // Executes an Op on the backend, returns an array of output TensorMetadata: + // TODO(kreeger): Output must be a 2D array - first keys, second values... + // (or map?) executeOp( opName: string, opAttrs: TFEOpAttr[], inputTensorIds: number[], numOutputs: number): TensorMetadata[]; From 33ac8a4774d17d61051f90d1d2dcae9b00245418 Mon Sep 17 00:00:00 2001 From: Nick Kreeger Date: Fri, 29 Mar 2019 20:31:19 -0700 Subject: [PATCH 5/5] save --- binding/tfjs_backend.cc | 54 +++++++++++++++++++++++++++--------- binding/tfjs_backend.h | 7 +++-- binding/tfjs_binding.cc | 17 ++++++------ src/nodejs_kernel_backend.ts | 41 ++++++++++++++------------- src/tfjs_binding.ts | 6 ++-- src/tfjs_binding_test.ts | 39 ++++++++++++++------------ tensor_alloc_test.js | 6 ++-- 7 files changed, 104 insertions(+), 66 deletions(-) diff --git a/binding/tfjs_backend.cc b/binding/tfjs_backend.cc index 5cce2cd5..17f7fa68 100644 --- a/binding/tfjs_backend.cc +++ b/binding/tfjs_backend.cc @@ -790,17 +790,18 @@ static void TFEHandlePairFinalize(napi_env env, void *data, void *hint) { } napi_status TFJSBackend::CreateTensorMetadataValue( - napi_env env, TFE_TensorHandle *tfe_handle, napi_value shape_value, - napi_value dtype_value, napi_value *tensor_metadata_value) { + napi_env env, TFE_TensorHandle *tfe_handle, napi_value key_value, + napi_value shape_value, napi_value dtype_value, + napi_value *tensor_metadata_value) { napi_status nstatus; // First bump tensor index and insert into the handle map: int32_t next_idx = next_tensor_id_++; // XXX heap? tfe_handle_map_->insert(std::make_pair(next_idx, tfe_handle)); - if (next_idx % 1000 == 0) { - fprintf(stderr, ":: next_id: %d\n", next_idx); - } + // if (next_idx % 1000 == 0) { + // fprintf(stderr, ":: next_id: %d\n", next_idx); + // } // Next, create an object to represent the TensorMetadata class. nstatus = napi_create_object(env, tensor_metadata_value); @@ -823,17 +824,20 @@ napi_status TFJSBackend::CreateTensorMetadataValue( dtype_value); ENSURE_NAPI_OK_RETVAL(env, nstatus, nstatus); + // TODO: update doc. + // TODO: consider napi_add_finalizer // Next create an external JS object that can be tracked for GC. This object // must be tracked to ensure the underlying TFE_TensorHandle data is cleanedup // when Tensor reference is GC'd. - nstatus = napi_wrap(env, *tensor_metadata_value, tfe_handle_map_, - TFEHandlePairFinalize, id_value, nullptr); + nstatus = napi_wrap(env, key_value, tfe_handle_map_, TFEHandlePairFinalize, + id_value, nullptr); ENSURE_NAPI_OK_RETVAL(env, nstatus, nstatus); return napi_ok; } -napi_value TFJSBackend::CreateTensor(napi_env env, napi_value shape_value, +napi_value TFJSBackend::CreateTensor(napi_env env, napi_value key_value, + napi_value shape_value, napi_value dtype_value, napi_value array_value) { napi_status nstatus; @@ -871,8 +875,8 @@ napi_value TFJSBackend::CreateTensor(napi_env env, napi_value shape_value, } napi_value tensor_metadata_value; - nstatus = CreateTensorMetadataValue(env, tfe_handle, shape_value, dtype_value, - &tensor_metadata_value); + nstatus = CreateTensorMetadataValue(env, tfe_handle, key_value, shape_value, + dtype_value, &tensor_metadata_value); ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); return tensor_metadata_value; @@ -981,6 +985,10 @@ napi_value TFJSBackend::ExecuteOp(napi_env env, napi_value op_name_value, TFE_Execute(tfe_op.op, result_handles.data(), &size, tf_status.status); ENSURE_TF_OK_RETVAL(env, tf_status, nullptr); + napi_value output_tensor_keys; + nstatus = napi_create_array_with_length(env, size, &output_tensor_keys); + ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); + napi_value output_tensor_infos; nstatus = napi_create_array_with_length(env, size, &output_tensor_infos); ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); @@ -990,6 +998,10 @@ napi_value TFJSBackend::ExecuteOp(napi_env env, napi_value op_name_value, for (int32_t i = 0; i < num_outputs; i++) { TFE_TensorHandle *handle = result_handles[i]; + napi_value key_value; + nstatus = napi_create_object(env, &key_value); + ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); + napi_value shape_value; GetTFE_TensorHandleShape(env, handle, &shape_value); // nstatus?? @@ -997,17 +1009,33 @@ napi_value TFJSBackend::ExecuteOp(napi_env env, napi_value op_name_value, GetTFE_TensorHandleType(env, handle, &dtype_value); napi_value tensor_metadata_value; - nstatus = CreateTensorMetadataValue(env, handle, shape_value, dtype_value, - &tensor_metadata_value); + nstatus = CreateTensorMetadataValue(env, handle, key_value, shape_value, + dtype_value, &tensor_metadata_value); ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); + // TODO update docs.. // Push into output array + nstatus = napi_set_element(env, output_tensor_keys, i, key_value); + ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); + nstatus = napi_set_element(env, output_tensor_infos, i, tensor_metadata_value); ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); } - return output_tensor_infos; + napi_value op_output_value; + nstatus = napi_create_object(env, &op_output_value); + ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); + + nstatus = + napi_set_named_property(env, op_output_value, "keys", output_tensor_keys); + ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); + + nstatus = napi_set_named_property(env, op_output_value, "tensors", + output_tensor_infos); + ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); + + return op_output_value; } } // namespace tfnodejs diff --git a/binding/tfjs_backend.h b/binding/tfjs_backend.h index 140d55ae..efc22d0c 100644 --- a/binding/tfjs_backend.h +++ b/binding/tfjs_backend.h @@ -34,11 +34,13 @@ class TFJSBackend { // Creates a new Tensor with given shape and data and returns an ID that // refernces the new Tensor. + // - key_value (TODO:kreeger - doc me.) // - shape_value (number[]) // - dtype_value (number) // - array_value (TypedArray|Array) - napi_value CreateTensor(napi_env env, napi_value shape_value, - napi_value dtype_value, napi_value array_value); + napi_value CreateTensor(napi_env env, napi_value key_value, + napi_value shape_value, napi_value dtype_value, + napi_value array_value); // Deletes a created Tensor. // - tensor_id_value (number) @@ -66,6 +68,7 @@ class TFJSBackend { // TODO - doc me. napi_status CreateTensorMetadataValue(napi_env env, TFE_TensorHandle* tfe_handle, + napi_value key_value, napi_value shape_value, napi_value dtype_value, napi_value* tensor_metadata_value); diff --git a/binding/tfjs_binding.cc b/binding/tfjs_binding.cc index bcf66850..c0b1e436 100644 --- a/binding/tfjs_binding.cc +++ b/binding/tfjs_binding.cc @@ -40,29 +40,30 @@ static napi_value CreateTensor(napi_env env, napi_callback_info info) { napi_status nstatus; // Create tensor takes 3 params: shape, dtype, typed-array/array: - size_t argc = 3; - napi_value args[3]; + size_t argc = 4; + napi_value args[4]; napi_value js_this; nstatus = napi_get_cb_info(env, info, &argc, args, &js_this, nullptr); ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); - if (argc < 3) { + if (argc < 4) { NAPI_THROW_ERROR(env, "Invalid number of args passed to createTensor()"); return nullptr; } - ENSURE_VALUE_IS_ARRAY_RETVAL(env, args[0], nullptr); - ENSURE_VALUE_IS_NUMBER_RETVAL(env, args[1], nullptr); + ENSURE_VALUE_IS_OBJECT_RETVAL(env, args[0], nullptr); + ENSURE_VALUE_IS_ARRAY_RETVAL(env, args[1], nullptr); + ENSURE_VALUE_IS_NUMBER_RETVAL(env, args[2], nullptr); // The third array can either be a typed array or an array: bool is_typed_array; - nstatus = napi_is_typedarray(env, args[2], &is_typed_array); + nstatus = napi_is_typedarray(env, args[3], &is_typed_array); ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr); if (!is_typed_array) { - ENSURE_VALUE_IS_ARRAY_RETVAL(env, args[2], nullptr); + ENSURE_VALUE_IS_ARRAY_RETVAL(env, args[3], nullptr); } - return gBackend->CreateTensor(env, args[0], args[1], args[2]); + return gBackend->CreateTensor(env, args[0], args[1], args[2], args[3]); } static napi_value DeleteTensor(napi_env env, napi_callback_info info) { diff --git a/src/nodejs_kernel_backend.ts b/src/nodejs_kernel_backend.ts index ac8a3043..9cb41d85 100644 --- a/src/nodejs_kernel_backend.ts +++ b/src/nodejs_kernel_backend.ts @@ -26,16 +26,13 @@ import {isNullOrUndefined} from 'util'; import {Int64Scalar} from './int64_tensors'; // tslint:disable-next-line:max-line-length import {createTensorsTypeOpAttr, createTypeOpAttr, getTFDType} from './ops/op_utils'; -import {TensorMetadata, TFEOpAttr, TFJSBinding} from './tfjs_binding'; +import {DataId, TensorMetadata, TFEOpAttr, TFJSBinding} from './tfjs_binding'; type TensorInfo = { metadata: TensorMetadata, values: Float32Array|Int32Array|Uint8Array, }; -// TODO(kreeger): Move to binding def. -interface DataId {} - export class NodeJSKernelBackend extends KernelBackend { binding: TFJSBinding; isGPUPackage: boolean; @@ -73,11 +70,8 @@ export class NodeJSKernelBackend extends KernelBackend { } // Creates a new Tensor and maps the dataId to the passed in ID. - private createOutputTensor(metadata: TensorMetadata): Tensor { - // TODO(kreeger): Can we pass ID back from the backend??? - const newId = {}; - - this.tensorMap.set(newId, {metadata, values: null}); + private createOutputTensor(dataId: DataId, metadata: TensorMetadata): Tensor { + this.tensorMap.set(dataId, {metadata, values: null}); let dtype: DataType; switch (metadata.dtype) { @@ -104,7 +98,7 @@ export class NodeJSKernelBackend extends KernelBackend { default: throw new Error(`Unknown dtype enum ${metadata.dtype}`); } - return Tensor.make(metadata.shape, {dataId: newId}, dtype); + return Tensor.make(metadata.shape, {dataId}, dtype); } // Prepares Tensor instances for Op execution. @@ -112,25 +106,27 @@ export class NodeJSKernelBackend extends KernelBackend { const ids: number[] = []; for (let i = 0; i < tensors.length; i++) { if (tensors[i] instanceof Tensor) { - const info = this.tensorMap.get((tensors[i] as Tensor).dataId); + const curTensor = tensors[i] as Tensor; + const info = this.tensorMap.get(curTensor.dataId); // TODO - what about ID in this case? Handle in write()?? if (info.values != null) { // Values were delayed to write into the TensorHandle. Do that before // Op execution and clear stored values. - - // TODO(kreeger): Pass object to bind since that is the GC'd key. info.metadata = this.binding.createTensor( - info.metadata.shape, info.metadata.dtype, info.values); + curTensor.dataId, info.metadata.shape, info.metadata.dtype, + info.values); info.values = null; - this.tensorMap.set((tensors[i] as Tensor).dataId, info); + this.tensorMap.set(curTensor.dataId, info); } ids.push(info.metadata.id); } else if (tensors[i] instanceof Int64Scalar) { // Then `tensors[i]` is a Int64Scalar, which we currently represent // using an `Int32Array`. const value = (tensors[i] as Int64Scalar).valueArray; + + // TODO(kreeger): How does this case work? const metadata = - this.binding.createTensor([], this.binding.TF_INT64, value); + this.binding.createTensor({}, [], this.binding.TF_INT64, value); ids.push(metadata.id); } else { throw new Error(`Invalid Tensor type: ${typeof tensors[i]}`); @@ -164,9 +160,9 @@ export class NodeJSKernelBackend extends KernelBackend { */ executeSingleOutput(name: string, opAttrs: TFEOpAttr[], inputs: Tensor[]): Tensor { - const outputMetadata = this.binding.executeOp( + const opResult = this.binding.executeOp( name, opAttrs, this.getInputTensorIds(inputs), 1); - return this.createOutputTensor(outputMetadata[0]); + return this.createOutputTensor(opResult.keys[0], opResult.tensors[0]); } /** @@ -180,9 +176,14 @@ export class NodeJSKernelBackend extends KernelBackend { executeMultipleOutputs( name: string, opAttrs: TFEOpAttr[], inputs: Tensor[], numOutputs: number): Tensor[] { - const outputMetadata = this.binding.executeOp( + const results = [] as Tensor[]; + const opOutputs = this.binding.executeOp( name, opAttrs, this.getInputTensorIds(inputs), numOutputs); - return outputMetadata.map(m => this.createOutputTensor(m)); + for (let i = 0; i < opOutputs.keys.length; i++) { + results.push( + this.createOutputTensor(opOutputs.keys[i], opOutputs.tensors[i])); + } + return results; } dispose(): void {} diff --git a/src/tfjs_binding.ts b/src/tfjs_binding.ts index 41302d8e..b46817a4 100644 --- a/src/tfjs_binding.ts +++ b/src/tfjs_binding.ts @@ -15,6 +15,8 @@ * ============================================================================= */ +export interface DataId {} + export declare class TensorMetadata { id: number; shape: number[]; @@ -34,7 +36,7 @@ export interface TFJSBinding { // Creates a tensor with the backend: // TODO(kreeger): Need to pass in the Tensor key object here as well. createTensor( - shape: number[], dtype: number, + dataId: DataId, shape: number[], dtype: number, buffer: Float32Array|Int32Array|Uint8Array): TensorMetadata; // Deletes a tensor with the backend: @@ -49,7 +51,7 @@ export interface TFJSBinding { // (or map?) executeOp( opName: string, opAttrs: TFEOpAttr[], inputTensorIds: number[], - numOutputs: number): TensorMetadata[]; + numOutputs: number): {keys: DataId[], tensors: TensorMetadata[]}; // TF Types TF_FLOAT: number; diff --git a/src/tfjs_binding_test.ts b/src/tfjs_binding_test.ts index daaa097b..8f60953d 100644 --- a/src/tfjs_binding_test.ts +++ b/src/tfjs_binding_test.ts @@ -68,7 +68,7 @@ describe('Exposes TF Version', () => { describe('tensor management', () => { it('Creates and deletes a valid tensor', () => { const values = new Int32Array([1, 2]); - const tensor = binding.createTensor([2], binding.TF_INT32, values); + const tensor = binding.createTensor({}, [2], binding.TF_INT32, values); expect(tensor).toBeDefined(); // TODO(kreeger): Check shape and dtype here @@ -77,25 +77,27 @@ describe('tensor management', () => { }); it('throws exception when shape does not match data', () => { expect(() => { - binding.createTensor([2], binding.TF_INT32, new Int32Array([1, 2, 3])); + binding.createTensor( + {}, [2], binding.TF_INT32, new Int32Array([1, 2, 3])); }).toThrowError(); expect(() => { - binding.createTensor([4], binding.TF_INT32, new Int32Array([1, 2, 3])); + binding.createTensor( + {}, [4], binding.TF_INT32, new Int32Array([1, 2, 3])); }).toThrowError(); }); it('throws exception with invalid dtype', () => { expect(() => { // tslint:disable-next-line:no-unused-expression - binding.createTensor([1], 1000, new Int32Array([1])); + binding.createTensor({}, [1], 1000, new Int32Array([1])); }).toThrowError(); }); it('works with 0-dim tensors', () => { // Reduce op (e.g 'Max') will produce a 0-dim TFE_Tensor. - const input = - binding.createTensor([3], binding.TF_INT32, new Int32Array([1, 2, 3])); + const input = binding.createTensor( + {}, [3], binding.TF_INT32, new Int32Array([1, 2, 3])); const axes = - binding.createTensor([1], binding.TF_INT32, new Int32Array([0])); + binding.createTensor({}, [1], binding.TF_INT32, new Int32Array([0])); const attrs = [ {name: 'keep_dims', type: binding.TF_ATTR_BOOL, value: false}, @@ -103,14 +105,14 @@ describe('tensor management', () => { {name: 'Tidx', type: binding.TF_ATTR_TYPE, value: binding.TF_INT32} ]; - const outputMetadata = - binding.executeOp('Max', attrs, [input.id, axes.id], 1); - expect(outputMetadata.length).toBe(1); + const output = binding.executeOp('Max', attrs, [input.id, axes.id], 1); + expect(output.tensors.length).toBe(1); - expect(outputMetadata[0].id).toBeDefined(); - expect(outputMetadata[0].shape).toEqual([]); - expect(outputMetadata[0].dtype).toEqual(binding.TF_INT32); - expect(binding.tensorDataSync(outputMetadata[0].id)) + // TODO test more + expect(output.tensors[0].id).toBeDefined(); + expect(output.tensors[0].shape).toEqual([]); + expect(output.tensors[0].dtype).toEqual(binding.TF_INT32); + expect(binding.tensorDataSync(output.tensors[0].id)) .toEqual(new Int32Array([3])); }); }); @@ -123,9 +125,9 @@ describe('executeOp', () => { {name: 'T', type: binding.TF_ATTR_TYPE, value: binding.TF_FLOAT} ]; const a = binding.createTensor( - [2, 2], binding.TF_FLOAT, new Float32Array([1, 2, 3, 4])); + {}, [2, 2], binding.TF_FLOAT, new Float32Array([1, 2, 3, 4])); const b = binding.createTensor( - [2, 2], binding.TF_FLOAT, new Float32Array([4, 3, 2, 1])); + {}, [2, 2], binding.TF_FLOAT, new Float32Array([4, 3, 2, 1])); const matMulInput = [a.id, b.id]; it('throws exception with invalid Op Name', () => { @@ -296,8 +298,9 @@ describe('executeOp', () => { }).toThrowError(); }); it('should work for matmul', () => { - const output = binding.executeOp(name, matMulOpAttrs, matMulInput, 1); - expect(binding.tensorDataSync(output[0].id)).toEqual(new Float32Array([ + const tensor = + binding.executeOp(name, matMulOpAttrs, matMulInput, 1).tensors[0]; + expect(binding.tensorDataSync(tensor.id)).toEqual(new Float32Array([ 8, 5, 20, 13 ])); }); diff --git a/tensor_alloc_test.js b/tensor_alloc_test.js index 4b0b363b..4d44db9f 100644 --- a/tensor_alloc_test.js +++ b/tensor_alloc_test.js @@ -7,9 +7,9 @@ function sleep(ms) { } async function run() { - // console.log('waiting...'); - // await sleep(20000); - // console.log('running...'); + console.log('waiting...'); + await sleep(20000); + console.log('running...'); // Uncomment to use vanilla cpu backend. // tf.setBackend('cpu');