{ "version": 3, "sources": ["../src/helpers.ts", "../src/config.ts", "../node_modules/.pnpm/long@4.0.0/node_modules/long/src/long.js", "../node_modules/.pnpm/node-fetch@2.6.2/node_modules/node-fetch/browser.js", "../node_modules/.pnpm/seedrandom@2.4.3/node_modules/seedrandom/lib/alea.js", "../node_modules/.pnpm/seedrandom@2.4.3/node_modules/seedrandom/lib/xor128.js", "../node_modules/.pnpm/seedrandom@2.4.3/node_modules/seedrandom/lib/xorwow.js", "../node_modules/.pnpm/seedrandom@2.4.3/node_modules/seedrandom/lib/xorshift7.js", "../node_modules/.pnpm/seedrandom@2.4.3/node_modules/seedrandom/lib/xor4096.js", "../node_modules/.pnpm/seedrandom@2.4.3/node_modules/seedrandom/lib/tychei.js", "(disabled):crypto", "../node_modules/.pnpm/seedrandom@2.4.3/node_modules/seedrandom/seedrandom.js", "../node_modules/.pnpm/seedrandom@2.4.3/node_modules/seedrandom/index.js", "../node_modules/.pnpm/seedrandom@3.0.5/node_modules/seedrandom/lib/alea.js", "../node_modules/.pnpm/seedrandom@3.0.5/node_modules/seedrandom/lib/xor128.js", "../node_modules/.pnpm/seedrandom@3.0.5/node_modules/seedrandom/lib/xorwow.js", "../node_modules/.pnpm/seedrandom@3.0.5/node_modules/seedrandom/lib/xorshift7.js", "../node_modules/.pnpm/seedrandom@3.0.5/node_modules/seedrandom/lib/xor4096.js", "../node_modules/.pnpm/seedrandom@3.0.5/node_modules/seedrandom/lib/tychei.js", "../node_modules/.pnpm/seedrandom@3.0.5/node_modules/seedrandom/seedrandom.js", "../node_modules/.pnpm/seedrandom@3.0.5/node_modules/seedrandom/index.js", "../node_modules/.pnpm/string_decoder@1.1.1/node_modules/string_decoder/lib/string_decoder.js", "(disabled):path", "(disabled):worker_threads", "(disabled):perf_hooks", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/wasm-out/tfjs-backend-wasm-threaded-simd.js", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/wasm-out/tfjs-backend-wasm.js", "../node_modules/.pnpm/tfjs-core/src/backends/backend.ts", "../node_modules/.pnpm/tfjs-core/src/util_base.ts", "../node_modules/.pnpm/tfjs-core/src/log.ts", "../node_modules/.pnpm/tfjs-core/src/environment.ts", "../node_modules/.pnpm/tfjs-core/src/global_util.ts", "../node_modules/.pnpm/tfjs-core/src/kernel_names.ts", "../node_modules/.pnpm/tfjs-core/src/kernel_registry.ts", "../node_modules/.pnpm/tfjs-core/src/util.ts", "../node_modules/.pnpm/tfjs-core/src/hash_util.ts", "../node_modules/.pnpm/tfjs-core/src/profiler.ts", "../node_modules/.pnpm/tfjs-core/src/tape.ts", "../node_modules/.pnpm/tfjs-core/src/tensor_format.ts", "../node_modules/.pnpm/tfjs-core/src/tensor.ts", "../node_modules/.pnpm/tfjs-core/src/tensor_util.ts", "../node_modules/.pnpm/tfjs-core/src/types.ts", "../node_modules/.pnpm/tfjs-core/src/engine.ts", "../node_modules/.pnpm/tfjs-core/src/device_util.ts", "../node_modules/.pnpm/tfjs-core/src/flags.ts", "../node_modules/.pnpm/tfjs-core/src/tensor_util_env.ts", "../node_modules/.pnpm/tfjs-core/src/ops/operation.ts", "../node_modules/.pnpm/tfjs-core/src/ops/complex.ts", "../node_modules/.pnpm/tfjs-core/src/ops/tensor_ops_util.ts", "../node_modules/.pnpm/tfjs-core/src/ops/tensor.ts", "../node_modules/.pnpm/tfjs-core/src/io/types.ts", "../node_modules/.pnpm/tfjs-core/src/io/io_utils.ts", "../node_modules/.pnpm/tfjs-core/src/io/router_registry.ts", "../node_modules/.pnpm/tfjs-core/src/io/indexed_db.ts", "../node_modules/.pnpm/tfjs-core/src/io/local_storage.ts", "../node_modules/.pnpm/tfjs-core/src/io/model_management.ts", "../node_modules/.pnpm/tfjs-core/src/platforms/platform_browser.ts", "../node_modules/.pnpm/tfjs-core/src/platforms/platform_node.ts", "../node_modules/.pnpm/tfjs-core/src/ops/buffer.ts", "../node_modules/.pnpm/tfjs-core/src/ops/cast.ts", "../node_modules/.pnpm/tfjs-core/src/ops/clone.ts", "../node_modules/.pnpm/tfjs-core/src/ops/print.ts", "../node_modules/.pnpm/tfjs-core/src/base_side_effects.ts", "../node_modules/.pnpm/tfjs-core/src/io/io.ts", "../node_modules/.pnpm/tfjs-core/src/io/browser_files.ts", "../node_modules/.pnpm/tfjs-core/src/io/progress.ts", "../node_modules/.pnpm/tfjs-core/src/io/weights_loader.ts", "../node_modules/.pnpm/tfjs-core/src/io/http.ts", "../node_modules/.pnpm/tfjs-core/src/io/passthrough.ts", "../node_modules/.pnpm/tfjs-core/src/math.ts", "../node_modules/.pnpm/tfjs-core/src/ops/mat_mul.ts", "../node_modules/.pnpm/tfjs-core/src/ops/one_hot.ts", "../node_modules/.pnpm/tfjs-core/src/ops/transpose.ts", "../node_modules/.pnpm/tfjs-core/src/ops/confusion_matrix.ts", "../node_modules/.pnpm/tfjs-core/src/ops/browser.ts", "../node_modules/.pnpm/tfjs-core/src/ops/tensor3d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/gather_nd_util.ts", "../node_modules/.pnpm/tfjs-core/src/ops/scatter_nd_util.ts", "../node_modules/.pnpm/tfjs-core/src/ops/slice_util.ts", "../node_modules/.pnpm/tfjs-core/src/serialization.ts", "../node_modules/.pnpm/tfjs-core/src/test_util.ts", "../node_modules/.pnpm/tfjs-core/src/version.ts", "../node_modules/.pnpm/tfjs-core/src/globals.ts", "../node_modules/.pnpm/tfjs-core/src/ops/add.ts", "../node_modules/.pnpm/tfjs-core/src/ops/floorDiv.ts", "../node_modules/.pnpm/tfjs-core/src/ops/div.ts", "../node_modules/.pnpm/tfjs-core/src/ops/mul.ts", "../node_modules/.pnpm/tfjs-core/src/ops/abs.ts", "../node_modules/.pnpm/tfjs-core/src/ops/acos.ts", "../node_modules/.pnpm/tfjs-core/src/ops/acosh.ts", "../node_modules/.pnpm/tfjs-core/src/ops/add_n.ts", "../node_modules/.pnpm/tfjs-core/src/ops/all.ts", "../node_modules/.pnpm/tfjs-core/src/ops/any.ts", "../node_modules/.pnpm/tfjs-core/src/ops/arg_max.ts", "../node_modules/.pnpm/tfjs-core/src/ops/arg_min.ts", "../node_modules/.pnpm/tfjs-core/src/ops/asin.ts", "../node_modules/.pnpm/tfjs-core/src/ops/asinh.ts", "../node_modules/.pnpm/tfjs-core/src/ops/atan.ts", "../node_modules/.pnpm/tfjs-core/src/ops/atan2.ts", "../node_modules/.pnpm/tfjs-core/src/ops/atanh.ts", "../node_modules/.pnpm/tfjs-core/src/ops/conv_util.ts", "../node_modules/.pnpm/tfjs-core/src/ops/reshape.ts", "../node_modules/.pnpm/tfjs-core/src/ops/avg_pool.ts", "../node_modules/.pnpm/tfjs-core/src/ops/avg_pool_3d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/concat.ts", "../node_modules/.pnpm/tfjs-core/src/ops/sigmoid.ts", "../node_modules/.pnpm/tfjs-core/src/ops/slice.ts", "../node_modules/.pnpm/tfjs-core/src/ops/tanh.ts", "../node_modules/.pnpm/tfjs-core/src/ops/basic_lstm_cell.ts", "../node_modules/.pnpm/tfjs-core/src/ops/batch_to_space_nd.ts", "../node_modules/.pnpm/tfjs-core/src/ops/batchnorm_util.ts", "../node_modules/.pnpm/tfjs-core/src/ops/batchnorm.ts", "../node_modules/.pnpm/tfjs-core/src/ops/batchnorm2d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/batchnorm3d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/batchnorm4d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/bincount.ts", "../node_modules/.pnpm/tfjs-core/src/ops/broadcast_args.ts", "../node_modules/.pnpm/tfjs-core/src/ops/broadcast_to.ts", "../node_modules/.pnpm/tfjs-core/src/ops/ceil.ts", "../node_modules/.pnpm/tfjs-core/src/ops/clip_by_value.ts", "../node_modules/.pnpm/tfjs-core/src/ops/concat_1d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/concat_2d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/concat_3d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/concat_4d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/conv2d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/conv1d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/conv2d_backprop_input.ts", "../node_modules/.pnpm/tfjs-core/src/ops/conv2d_transpose.ts", "../node_modules/.pnpm/tfjs-core/src/ops/conv3d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/conv3d_backprop_input.ts", "../node_modules/.pnpm/tfjs-core/src/ops/conv3d_transpose.ts", "../node_modules/.pnpm/tfjs-core/src/ops/cos.ts", "../node_modules/.pnpm/tfjs-core/src/ops/cosh.ts", "../node_modules/.pnpm/tfjs-core/src/ops/cumsum.ts", "../node_modules/.pnpm/tfjs-core/src/ops/dense_bincount.ts", "../node_modules/.pnpm/tfjs-core/src/ops/depth_to_space.ts", "../node_modules/.pnpm/tfjs-core/src/ops/depthwise_conv2d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/diag.ts", "../node_modules/.pnpm/tfjs-core/src/ops/dilation2d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/broadcast_util.ts", "../node_modules/.pnpm/tfjs-core/src/ops/equal.ts", "../node_modules/.pnpm/tfjs-core/src/ops/where.ts", "../node_modules/.pnpm/tfjs-core/src/ops/zeros_like.ts", "../node_modules/.pnpm/tfjs-core/src/ops/div_no_nan.ts", "../node_modules/.pnpm/tfjs-core/src/ops/dot.ts", "../node_modules/.pnpm/tfjs-core/src/ops/einsum.ts", "../node_modules/.pnpm/tfjs-core/src/ops/elu.ts", "../node_modules/.pnpm/tfjs-core/src/ops/erf.ts", "../node_modules/.pnpm/tfjs-core/src/ops/exp.ts", "../node_modules/.pnpm/tfjs-core/src/ops/expand_dims.ts", "../node_modules/.pnpm/tfjs-core/src/ops/expm1.ts", "../node_modules/.pnpm/tfjs-core/src/ops/tile.ts", "../node_modules/.pnpm/tfjs-core/src/ops/eye.ts", "../node_modules/.pnpm/tfjs-core/src/ops/fill.ts", "../node_modules/.pnpm/tfjs-core/src/ops/floor.ts", "../node_modules/.pnpm/tfjs-core/src/ops/gather.ts", "../node_modules/.pnpm/tfjs-core/src/ops/greater.ts", "../node_modules/.pnpm/tfjs-core/src/ops/greater_equal.ts", "../node_modules/.pnpm/tfjs-core/src/ops/imag.ts", "../node_modules/.pnpm/tfjs-core/src/ops/is_finite.ts", "../node_modules/.pnpm/tfjs-core/src/ops/is_inf.ts", "../node_modules/.pnpm/tfjs-core/src/ops/is_nan.ts", "../node_modules/.pnpm/tfjs-core/src/ops/leaky_relu.ts", "../node_modules/.pnpm/tfjs-core/src/ops/less.ts", "../node_modules/.pnpm/tfjs-core/src/ops/less_equal.ts", "../node_modules/.pnpm/tfjs-core/src/ops/linspace.ts", "../node_modules/.pnpm/tfjs-core/src/ops/local_response_normalization.ts", "../node_modules/.pnpm/tfjs-core/src/ops/log.ts", "../node_modules/.pnpm/tfjs-core/src/ops/log1p.ts", "../node_modules/.pnpm/tfjs-core/src/gradients.ts", "../node_modules/.pnpm/tfjs-core/src/ops/neg.ts", "../node_modules/.pnpm/tfjs-core/src/ops/softplus.ts", "../node_modules/.pnpm/tfjs-core/src/ops/log_sigmoid.ts", "../node_modules/.pnpm/tfjs-core/src/ops/max.ts", "../node_modules/.pnpm/tfjs-core/src/ops/sub.ts", "../node_modules/.pnpm/tfjs-core/src/ops/sum.ts", "../node_modules/.pnpm/tfjs-core/src/ops/log_softmax.ts", "../node_modules/.pnpm/tfjs-core/src/ops/axis_util.ts", "../node_modules/.pnpm/tfjs-core/src/ops/log_sum_exp.ts", "../node_modules/.pnpm/tfjs-core/src/ops/logical_and.ts", "../node_modules/.pnpm/tfjs-core/src/ops/logical_not.ts", "../node_modules/.pnpm/tfjs-core/src/ops/logical_or.ts", "../node_modules/.pnpm/tfjs-core/src/ops/logical_xor.ts", "../node_modules/.pnpm/tfjs-core/src/ops/max_pool.ts", "../node_modules/.pnpm/tfjs-core/src/ops/max_pool_3d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/max_pool_with_argmax.ts", "../node_modules/.pnpm/tfjs-core/src/ops/maximum.ts", "../node_modules/.pnpm/tfjs-core/src/ops/mean.ts", "../node_modules/.pnpm/tfjs-core/src/ops/zeros.ts", "../node_modules/.pnpm/tfjs-core/src/ops/ones.ts", "../node_modules/.pnpm/tfjs-core/src/ops/meshgrid.ts", "../node_modules/.pnpm/tfjs-core/src/ops/min.ts", "../node_modules/.pnpm/tfjs-core/src/ops/minimum.ts", "../node_modules/.pnpm/tfjs-core/src/ops/mirror_pad.ts", "../node_modules/.pnpm/tfjs-core/src/ops/mod.ts", "../node_modules/.pnpm/tfjs-core/src/ops/square.ts", "../node_modules/.pnpm/tfjs-core/src/ops/moments.ts", "../node_modules/.pnpm/tfjs-core/src/ops/multi_rnn_cell.ts", "../node_modules/.pnpm/tfjs-core/src/ops/multinomial.ts", "../node_modules/.pnpm/tfjs-core/src/ops/not_equal.ts", "../node_modules/.pnpm/tfjs-core/src/ops/ones_like.ts", "../node_modules/.pnpm/tfjs-core/src/ops/outer_product.ts", "../node_modules/.pnpm/tfjs-core/src/ops/pad.ts", "../node_modules/.pnpm/tfjs-core/src/ops/pad1d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/pad2d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/pad3d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/pad4d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/space_to_batch_nd.ts", "../node_modules/.pnpm/tfjs-core/src/ops/pool.ts", "../node_modules/.pnpm/tfjs-core/src/ops/pow.ts", "../node_modules/.pnpm/tfjs-core/src/ops/prelu.ts", "../node_modules/.pnpm/tfjs-core/src/ops/prod.ts", "../node_modules/.pnpm/tfjs-core/src/ops/rand.ts", "../node_modules/.pnpm/tfjs-core/src/ops/rand_util.ts", "../node_modules/.pnpm/tfjs-core/src/ops/random_gamma.ts", "../node_modules/.pnpm/tfjs-core/src/ops/random_normal.ts", "../node_modules/.pnpm/tfjs-core/src/ops/random_uniform.ts", "../node_modules/.pnpm/tfjs-core/src/ops/range.ts", "../node_modules/.pnpm/tfjs-core/src/ops/real.ts", "../node_modules/.pnpm/tfjs-core/src/ops/reciprocal.ts", "../node_modules/.pnpm/tfjs-core/src/ops/relu.ts", "../node_modules/.pnpm/tfjs-core/src/ops/relu6.ts", "../node_modules/.pnpm/tfjs-core/src/ops/reverse.ts", "../node_modules/.pnpm/tfjs-core/src/ops/reverse_1d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/reverse_2d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/reverse_3d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/reverse_4d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/round.ts", "../node_modules/.pnpm/tfjs-core/src/ops/rsqrt.ts", "../node_modules/.pnpm/tfjs-core/src/ops/scalar.ts", "../node_modules/.pnpm/tfjs-core/src/ops/selu.ts", "../node_modules/.pnpm/tfjs-core/src/ops/separable_conv2d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/setdiff1d_async.ts", "../node_modules/.pnpm/tfjs-core/src/ops/sign.ts", "../node_modules/.pnpm/tfjs-core/src/ops/sin.ts", "../node_modules/.pnpm/tfjs-core/src/ops/sinh.ts", "../node_modules/.pnpm/tfjs-core/src/ops/slice1d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/slice2d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/slice3d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/slice4d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/softmax.ts", "../node_modules/.pnpm/tfjs-core/src/ops/spectral/fft.ts", "../node_modules/.pnpm/tfjs-core/src/ops/spectral/ifft.ts", "../node_modules/.pnpm/tfjs-core/src/ops/spectral/irfft.ts", "../node_modules/.pnpm/tfjs-core/src/ops/split.ts", "../node_modules/.pnpm/tfjs-core/src/ops/spectral/rfft.ts", "../node_modules/.pnpm/tfjs-core/src/ops/sqrt.ts", "../node_modules/.pnpm/tfjs-core/src/ops/squared_difference.ts", "../node_modules/.pnpm/tfjs-core/src/ops/squeeze.ts", "../node_modules/.pnpm/tfjs-core/src/ops/stack.ts", "../node_modules/.pnpm/tfjs-core/src/ops/step.ts", "../node_modules/.pnpm/tfjs-core/src/ops/strided_slice.ts", "../node_modules/.pnpm/tfjs-core/src/ops/tan.ts", "../node_modules/.pnpm/tfjs-core/src/ops/tensor1d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/tensor2d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/tensor4d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/tensor5d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/tensor6d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/topk.ts", "../node_modules/.pnpm/tfjs-core/src/ops/truncated_normal.ts", "../node_modules/.pnpm/tfjs-core/src/ops/unique.ts", "../node_modules/.pnpm/tfjs-core/src/ops/unsorted_segment_sum.ts", "../node_modules/.pnpm/tfjs-core/src/ops/unstack.ts", "../node_modules/.pnpm/tfjs-core/src/ops/variable.ts", "../node_modules/.pnpm/tfjs-core/src/backends/where_impl.ts", "../node_modules/.pnpm/tfjs-core/src/ops/where_async.ts", "../node_modules/.pnpm/tfjs-core/src/ops/boolean_mask.ts", "../node_modules/.pnpm/tfjs-core/src/ops/norm.ts", "../node_modules/.pnpm/tfjs-core/src/ops/moving_average.ts", "../node_modules/.pnpm/tfjs-core/src/ops/scatter_nd.ts", "../node_modules/.pnpm/tfjs-core/src/ops/sparse_to_dense_util.ts", "../node_modules/.pnpm/tfjs-core/src/ops/sparse_to_dense.ts", "../node_modules/.pnpm/tfjs-core/src/ops/gather_nd.ts", "../node_modules/.pnpm/tfjs-core/src/ops/dropout_util.ts", "../node_modules/.pnpm/tfjs-core/src/ops/dropout.ts", "../node_modules/.pnpm/tfjs-core/src/ops/signal_ops_util.ts", "../node_modules/.pnpm/tfjs-core/src/ops/in_top_k.ts", "../node_modules/.pnpm/tfjs-core/src/ops/fused_ops.ts", "../node_modules/.pnpm/tfjs-core/src/ops/conv2d_backprop_filter.ts", "../node_modules/.pnpm/tfjs-core/src/ops/fused_util.ts", "../node_modules/.pnpm/tfjs-core/src/ops/fused/conv2d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/depthwise_conv2d_native_backprop_filter.ts", "../node_modules/.pnpm/tfjs-core/src/ops/depthwise_conv2d_native_backprop_input.ts", "../node_modules/.pnpm/tfjs-core/src/ops/fused/depthwise_conv2d.ts", "../node_modules/.pnpm/tfjs-core/src/ops/fused/mat_mul.ts", "../node_modules/.pnpm/tfjs-core/src/ops/signal/hamming_window.ts", "../node_modules/.pnpm/tfjs-core/src/ops/signal/hann_window.ts", "../node_modules/.pnpm/tfjs-core/src/ops/signal/frame.ts", "../node_modules/.pnpm/tfjs-core/src/ops/signal/stft.ts", "../node_modules/.pnpm/tfjs-core/src/ops/image/crop_and_resize.ts", "../node_modules/.pnpm/tfjs-core/src/ops/image/flip_left_right.ts", "../node_modules/.pnpm/tfjs-core/src/ops/image/grayscale_to_rgb.ts", "../node_modules/.pnpm/tfjs-core/src/ops/image/rotate_with_offset.ts", "../node_modules/.pnpm/tfjs-core/src/ops/nonmax_util.ts", "../node_modules/.pnpm/tfjs-core/src/ops/image/non_max_suppression.ts", "../node_modules/.pnpm/tfjs-core/src/backends/non_max_suppression_util.ts", "../node_modules/.pnpm/tfjs-core/src/backends/non_max_suppression_impl.ts", "../node_modules/.pnpm/tfjs-core/src/ops/image/non_max_suppression_async.ts", "../node_modules/.pnpm/tfjs-core/src/ops/image/non_max_suppression_with_score.ts", "../node_modules/.pnpm/tfjs-core/src/ops/image/non_max_suppression_with_score_async.ts", "../node_modules/.pnpm/tfjs-core/src/ops/image/non_max_suppression_padded.ts", "../node_modules/.pnpm/tfjs-core/src/ops/image/non_max_suppression_padded_async.ts", "../node_modules/.pnpm/tfjs-core/src/ops/image/resize_bilinear.ts", "../node_modules/.pnpm/tfjs-core/src/ops/image/resize_nearest_neighbor.ts", "../node_modules/.pnpm/tfjs-core/src/ops/image/threshold.ts", "../node_modules/.pnpm/tfjs-core/src/ops/image/transform.ts", "../node_modules/.pnpm/tfjs-core/src/ops/linalg/band_part.ts", "../node_modules/.pnpm/tfjs-core/src/ops/linalg/gram_schmidt.ts", "../node_modules/.pnpm/tfjs-core/src/ops/linalg/qr.ts", "../node_modules/.pnpm/tfjs-core/src/ops/loss_ops_utils.ts", "../node_modules/.pnpm/tfjs-core/src/ops/losses/compute_weighted_loss.ts", "../node_modules/.pnpm/tfjs-core/src/ops/losses/absolute_difference.ts", "../node_modules/.pnpm/tfjs-core/src/ops/losses/cosine_distance.ts", "../node_modules/.pnpm/tfjs-core/src/ops/losses/hinge_loss.ts", "../node_modules/.pnpm/tfjs-core/src/ops/losses/huber_loss.ts", "../node_modules/.pnpm/tfjs-core/src/ops/losses/log_loss.ts", "../node_modules/.pnpm/tfjs-core/src/ops/losses/mean_squared_error.ts", "../node_modules/.pnpm/tfjs-core/src/ops/losses/sigmoid_cross_entropy.ts", "../node_modules/.pnpm/tfjs-core/src/ops/losses/softmax_cross_entropy.ts", "../node_modules/.pnpm/tfjs-core/src/ops/sparse/sparse_fill_empty_rows.ts", "../node_modules/.pnpm/tfjs-core/src/ops/sparse/sparse_reshape.ts", "../node_modules/.pnpm/tfjs-core/src/ops/sparse/sparse_segment_mean.ts", "../node_modules/.pnpm/tfjs-core/src/ops/sparse/sparse_segment_sum.ts", "../node_modules/.pnpm/tfjs-core/src/ops/string/string_n_grams.ts", "../node_modules/.pnpm/tfjs-core/src/ops/string/string_split.ts", "../node_modules/.pnpm/tfjs-core/src/ops/string/string_to_hash_bucket_fast.ts", "../node_modules/.pnpm/tfjs-core/src/ops/ops.ts", "../node_modules/.pnpm/tfjs-core/src/optimizers/optimizer.ts", "../node_modules/.pnpm/tfjs-core/src/optimizers/adadelta_optimizer.ts", "../node_modules/.pnpm/tfjs-core/src/optimizers/adagrad_optimizer.ts", "../node_modules/.pnpm/tfjs-core/src/optimizers/adam_optimizer.ts", "../node_modules/.pnpm/tfjs-core/src/optimizers/adamax_optimizer.ts", "../node_modules/.pnpm/tfjs-core/src/optimizers/sgd_optimizer.ts", "../node_modules/.pnpm/tfjs-core/src/optimizers/momentum_optimizer.ts", "../node_modules/.pnpm/tfjs-core/src/optimizers/rmsprop_optimizer.ts", "../node_modules/.pnpm/tfjs-core/src/optimizers/optimizer_constructors.ts", "../node_modules/.pnpm/tfjs-core/src/train.ts", "../node_modules/.pnpm/tfjs-core/src/browser_util.ts", "../node_modules/.pnpm/tfjs-core/src/backends/backend_util.ts", "../node_modules/.pnpm/tfjs-core/src/ops/concat_util.ts", "../node_modules/.pnpm/tfjs-core/src/ops/reduce_util.ts", "../node_modules/.pnpm/tfjs-core/src/ops/rotate_util.ts", "../node_modules/.pnpm/tfjs-core/src/ops/array_ops_util.ts", "../node_modules/.pnpm/tfjs-core/src/ops/selu_util.ts", "../node_modules/.pnpm/tfjs-core/src/ops/erf_util.ts", "../node_modules/.pnpm/tfjs-core/src/backends/complex_util.ts", "../node_modules/.pnpm/tfjs-core/src/backends/einsum_util.ts", "../node_modules/.pnpm/tfjs-core/src/ops/split_util.ts", "../node_modules/.pnpm/tfjs-core/src/ops/segment_util.ts", "../node_modules/.pnpm/tfjs-core/src/backends/kernel_impls.ts", "../node_modules/.pnpm/tfjs-core/src/base.ts", "../node_modules/.pnpm/tfjs-core/src/index.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Abs_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Acos_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Acosh_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Add_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/AddN_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/ArgMax_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/ArgMin_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Asin_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Asinh_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Atan2_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Atan_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Atanh_grad.ts", "../node_modules/.pnpm/tfjs-core/src/ops/avg_pool_3d_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/AvgPool3D_grad.ts", "../node_modules/.pnpm/tfjs-core/src/ops/avg_pool_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/AvgPool_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/BatchMatMul_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/BatchToSpaceND_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/BroadcastTo_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Cast_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Ceil_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/ClipByValue_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/ComplexAbs_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Concat_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Conv2D_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Conv2DBackpropInput_grad.ts", "../node_modules/.pnpm/tfjs-core/src/ops/conv3d_backprop_filter.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Conv3D_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Cos_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Cosh_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Cumsum_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/DepthwiseConv2dNative_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Dilation2D_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Elu_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Erf_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Exp_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/ExpandDims_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Expm1_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Floor_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/FloorDiv_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/FusedBatchNorm_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/GatherV2_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/GreaterEqual_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Identity_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/IsFinite_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/IsInf_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/IsNan_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/LeakyRelu_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Log1p_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Log_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/LogSoftmax_grad.ts", "../node_modules/.pnpm/tfjs-core/src/ops/local_response_normalization_backprop.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/LRN_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/min_max_grad_util.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Max_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Maximum_grad.ts", "../node_modules/.pnpm/tfjs-core/src/ops/max_pool_3d_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/MaxPool3D_grad.ts", "../node_modules/.pnpm/tfjs-core/src/ops/max_pool_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/MaxPool_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Mean_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Min_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Minimum_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/MirrorPad_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Mod_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Multiply_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Neg_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/OneHot_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/OnesLike_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Pack_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/PadV2_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Pow_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Prelu_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/RealDiv_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Reciprocal_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Relu6_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Relu_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Reshape_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/ResizeBilinear_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/ResizeNearestNeighbor_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Reverse_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Round_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Rsqrt_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Select_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Selu_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Sigmoid_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Sign_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Sin_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Sinh_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Slice_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Softmax_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Softplus_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/SpaceToBatchND_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/SplitV_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Sqrt_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Square_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/SquaredDifference_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Step_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Sub_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Sum_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Tan_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Tanh_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Tile_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Transpose_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/Unpack_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/UnsortedSegmentSum_grad.ts", "../node_modules/.pnpm/tfjs-core/src/gradients/ZerosLike_grad.ts", "../node_modules/.pnpm/tfjs-core/src/register_all_gradients.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/abs.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/acos.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/acosh.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/add.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/all.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/any.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/arg_max.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/arg_min.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/as_scalar.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/as_type.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/as1d.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/as2d.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/as3d.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/as4d.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/as5d.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/asin.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/asinh.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/atan.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/atan2.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/atanh.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/avg_pool.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/batch_to_space_nd.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/batchnorm.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/broadcast_to.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/cast.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/ceil.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/clip_by_value.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/concat.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/conv1d.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/conv2d_transpose.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/conv2d.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/cos.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/cosh.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/cumsum.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/depth_to_space.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/depthwise_conv2d.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/dilation2d.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/div_no_nan.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/div.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/dot.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/elu.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/equal.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/erf.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/exp.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/expand_dims.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/expm1.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/fft.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/flatten.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/floor.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/floorDiv.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/gather.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/greater_equal.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/greater.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/ifft.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/irfft.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/is_finite.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/is_inf.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/is_nan.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/leaky_relu.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/less_equal.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/less.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/local_response_normalization.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/log_sigmoid.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/log_softmax.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/log_sum_exp.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/log.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/log1p.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/logical_and.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/logical_not.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/logical_or.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/logical_xor.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/mat_mul.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/max_pool.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/max.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/maximum.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/mean.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/min.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/minimum.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/mirror_pad.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/mod.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/mul.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/neg.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/norm.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/not_equal.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/one_hot.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/ones_like.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/pad.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/pool.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/pow.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/prelu.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/prod.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/reciprocal.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/relu.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/relu6.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/reshape_as.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/reshape.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/resize_bilinear.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/resize_nearest_neighbor.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/reverse.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/rfft.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/round.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/rsqrt.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/selu.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/separable_conv2d.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/sigmoid.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/sign.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/sin.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/sinh.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/slice.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/softmax.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/softplus.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/space_to_batch_nd.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/split.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/sqrt.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/square.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/squared_difference.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/squeeze.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/stack.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/step.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/strided_slice.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/sub.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/sum.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/tan.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/tanh.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/tile.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/to_bool.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/to_float.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/to_int.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/topk.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/transpose.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/unique.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/unsorted_segment_sum.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/unstack.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/where.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/zeros_like.ts", "../node_modules/.pnpm/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/exports_constraints.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/backend/common.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/errors.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/utils/generic_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/constraints.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/exports_initializers.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/keras_format/common.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/common.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/utils/math_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/backend/tfjs_backend.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/keras_format/initializer_config.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/initializers.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/exports_layers.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/backend/state.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/utils/types_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/utils/variable_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/variables.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/engine/topology.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/engine/input_layer.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/logs.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/base_callbacks.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/layers/serialization.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/losses.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/metrics.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/optimizers.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/user_defined_metadata.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/utils/layer_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/utils/serialization_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/version.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/engine/executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/engine/container.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/engine/training_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/engine/training_dataset.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/engine/training_tensors.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/engine/training.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/models.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/exports.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/activations.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/regularizers.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/layers/advanced_activations.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/utils/conv_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/layers/convolutional.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/layers/convolutional_depthwise.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/layers/recurrent.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/layers/convolutional_recurrent.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/layers/core.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/layers/embeddings.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/layers/merge.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/layers/noise.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/layers/normalization.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/layers/padding.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/layers/pooling.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/layers/wrappers.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/exports_metrics.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/exports_models.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/exports_regularizers.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/callbacks.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-layers/src/index.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/data/compiled_api.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/custom_op/register.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/arithmetic.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/basic_math.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/control.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/convolution.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/creation.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/dynamic.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/evaluation.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/graph.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/hash_table.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/image.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/logical.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/matrices.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/normalization.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/reduction.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/slice_join.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/sparse.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/spectral.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/string.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/transformation.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/operation_mapper.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/custom_op/node_value_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/arithmetic_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/basic_math_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/executor/tensor_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/executor/tensor_array.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/executor/tensor_list.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/control_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/convolution_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/creation_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/dynamic_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/evaluation_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/graph_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/executor/hash_table.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/hash_table_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/image_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/logical_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/matrices_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/normalization_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/reduction_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/slice_join_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/sparse_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/spectral_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/string_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/transformation_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/operations/operation_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/executor/execution_context.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/executor/model_analysis.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/executor/graph_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/executor/resource_manager.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/executor/graph_model.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/version.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-converter/src/index.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.9.0_470c05e138890ee649df8440b2dfddf4/node_modules/@tensorflow/tfjs-data/src/index.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.9.0_470c05e138890ee649df8440b2dfddf4/node_modules/@tensorflow/tfjs-data/src/dataset.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.9.0_470c05e138890ee649df8440b2dfddf4/node_modules/@tensorflow/tfjs-data/src/iterators/lazy_iterator.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.9.0_470c05e138890ee649df8440b2dfddf4/node_modules/@tensorflow/tfjs-data/src/util/deep_map.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.9.0_470c05e138890ee649df8440b2dfddf4/node_modules/@tensorflow/tfjs-data/src/util/deep_clone.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.9.0_470c05e138890ee649df8440b2dfddf4/node_modules/@tensorflow/tfjs-data/src/util/ring_buffer.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.9.0_470c05e138890ee649df8440b2dfddf4/node_modules/@tensorflow/tfjs-data/src/util/growing_ring_buffer.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.9.0_470c05e138890ee649df8440b2dfddf4/node_modules/@tensorflow/tfjs-data/src/datasets/text_line_dataset.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.9.0_470c05e138890ee649df8440b2dfddf4/node_modules/@tensorflow/tfjs-data/src/datasets/csv_dataset.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.9.0_470c05e138890ee649df8440b2dfddf4/node_modules/@tensorflow/tfjs-data/src/iterators/microphone_iterator.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.9.0_470c05e138890ee649df8440b2dfddf4/node_modules/@tensorflow/tfjs-data/src/iterators/webcam_iterator.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.9.0_470c05e138890ee649df8440b2dfddf4/node_modules/@tensorflow/tfjs-data/src/datasource.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.9.0_470c05e138890ee649df8440b2dfddf4/node_modules/@tensorflow/tfjs-data/src/iterators/string_iterator.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.9.0_470c05e138890ee649df8440b2dfddf4/node_modules/@tensorflow/tfjs-data/src/iterators/byte_chunk_iterator.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.9.0_470c05e138890ee649df8440b2dfddf4/node_modules/@tensorflow/tfjs-data/src/iterators/file_chunk_iterator.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.9.0_470c05e138890ee649df8440b2dfddf4/node_modules/@tensorflow/tfjs-data/src/iterators/url_chunk_iterator.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.9.0_470c05e138890ee649df8440b2dfddf4/node_modules/@tensorflow/tfjs-data/src/util/source_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.9.0_470c05e138890ee649df8440b2dfddf4/node_modules/@tensorflow/tfjs-data/src/sources/file_data_source.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.9.0_470c05e138890ee649df8440b2dfddf4/node_modules/@tensorflow/tfjs-data/src/sources/url_data_source.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.9.0_470c05e138890ee649df8440b2dfddf4/node_modules/@tensorflow/tfjs-data/src/readers.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.9.0_470c05e138890ee649df8440b2dfddf4/node_modules/@tensorflow/tfjs-data/src/version.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/cpu_util.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/backend_cpu.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/shared.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Abs.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/utils/binary_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Complex.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/utils/zeros_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Identity.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Real.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Cast.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/utils/binary_utils.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Add.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Bincount_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/utils/unary_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/utils/unary_utils.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Ceil.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Concat_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Equal.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Exp.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Expm1.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Floor.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/GatherNd_Impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/GatherV2_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Greater.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/GreaterEqual.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Less.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/LessEqual.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/LinSpace_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Log.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Max_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Maximum.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Minimum.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Multiply.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Neg.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/NotEqual.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Transpose_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Transpose.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Prod.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Range_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Rsqrt.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Sigmoid.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Slice.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/SparseFillEmptyRows_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/SparseReshape_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/SparseSegmentReduction_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Sqrt.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/SquaredDifference.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/StridedSlice_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/StringNGrams_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/StringSplit_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/StringToHashBucketFast_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Sub.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Tile_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/TopK_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Unique_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/base.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Elu.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/LeakyRelu.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Prelu.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Relu.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Relu6.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/utils/fused_utils.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Reshape.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/BatchMatMul.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/_FusedMatMul.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Acos.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Acosh.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/AddN.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/All.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Any.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/ArgMax.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/ArgMin.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Asin.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Asinh.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Atan.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Atan2.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Atanh.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/utils/pool_utils.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/AvgPool.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/AvgPool3D.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/AvgPool3DGrad.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/AvgPoolGrad.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/BatchNorm.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/BatchToSpaceND.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Bincount.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/BroadcastArgs.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Clip.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/ComplexAbs.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Imag.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Concat.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Conv2D.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Conv2DBackpropFilter.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Conv2DBackpropInput.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Conv3D.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Conv3DBackpropFilterV2.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Conv3DBackpropInputV2.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Cos.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Cosh.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/CropAndResize.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Cumsum.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/DenseBincount.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/DepthToSpace.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/DepthwiseConv2dNative.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/DepthwiseConv2dNativeBackpropFilter.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/DepthwiseConv2dNativeBackpropInput.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Diag.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Dilation2D.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Dilation2DBackpropFilter.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Dilation2DBackpropInput.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Sum.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Einsum.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/EluGrad.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Erf.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/ExpandDims.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/RealDiv.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/utils/fft_utils.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/FFT.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Fill.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/FlipLeftRight.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/FloorDiv.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/FusedConv2D.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/FusedDepthwiseConv2D.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/GatherNd.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/GatherV2.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/IFFT.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/IsFinite.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/IsInf.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/IsNaN.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/LinSpace.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Log1p.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/LogicalAnd.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/LogicalNot.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/LogicalOr.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/LRN.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/LRNGrad.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Max.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/MaxPool.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/MaxPool3D.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/MaxPool3DGrad.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/MaxPoolGrad.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/MaxPoolWithArgmax_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/MaxPoolWithArgmax.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Mean.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Min.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/MirrorPad.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Mod.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Multinomial.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Softmax.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/NonMaxSuppressionV3.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/NonMaxSuppressionV4.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/NonMaxSuppressionV5.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/OneHot.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/ZerosLike.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/OnesLike.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Pack.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/PadV2.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Pow.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Range.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Reciprocal.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/ResizeBilinear.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/ResizeBilinearGrad.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/ResizeNearestNeighbor.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/ResizeNearestNeighborGrad.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Reverse.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/RotateWithOffset.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Round.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Scatter_impl.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/ScatterNd.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Select.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Selu.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Sign.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Sin.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Sinh.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Softplus.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/SpaceToBatchND.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/SparseFillEmptyRows.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/SparseReshape.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/SparseSegmentMean.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/SparseSegmentSum.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/SparseToDense.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/SplitV.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Square.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Step.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/StridedSlice.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/StringNGrams.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/StringSplit.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/StringToHashBucketFast.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Tan.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Tanh.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Tile.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/TopK.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Transform.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Unique.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/Unpack.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/kernels/UnsortedSegmentSum.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/register_all_kernels.ts", "../node_modules/.pnpm/tfjs-backend-cpu/src/index.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/webgl_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/canvas_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/tex_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/flags_webgl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/glsl_version.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/shader_compiler_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/shader_compiler.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/gpgpu_math.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/decode_matrix_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/decode_matrix_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/encode_float_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/encode_float_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/encode_matrix_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/encode_matrix_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/gpgpu_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/gpgpu_context.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernel_utils/shared.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/packing_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/pack_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/reshape_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/texture_manager.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/unaryop_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/unaryop_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/unpack_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/backend_webgl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/version.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/webgl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/base.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/binaryop_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/binaryop_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Identity.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Complex.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/LeakyRelu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Prelu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernel_utils/kernel_funcs_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/mulmat_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/binaryop_complex_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Multiply.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernel_utils/reshape.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Reshape.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/mean_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/reduce_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernel_utils/reduce.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/transpose_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/transpose_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Transpose_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Sum_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Sum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Transpose.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/BatchMatMul_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/_FusedMatMul.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Abs.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Acos.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Acosh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Add.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/addn_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/addn_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/AddN.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/All.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Any.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/argminmax_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/argminmax_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernel_utils/arg_min_max.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ArgMax.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ArgMin.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Asin.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Asinh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Atan.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Atan2.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Atanh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/pool_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/AvgPool.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/AvgPool3D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/avg_pool_backprop_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/AvgPool3DGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/AvgPoolGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/BatchMatMul.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/batchnorm_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/batchnorm_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/BatchNorm.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/slice_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/slice_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Slice.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/BatchToSpaceND.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Bincount.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/NotEqual.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Real.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernel_utils/int.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Cast.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Ceil.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/clip_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/clip_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ClipByValue.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/complex_abs_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ComplexAbs.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/concat_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/concat_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Imag.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Concat_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Concat.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/conv_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/im2col_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Conv2D_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Conv2D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/conv_backprop_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Conv2DBackpropFilter.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Conv2DBackpropInput.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Conv3D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Conv3DBackpropFilterV2.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Conv3DBackpropInputV2.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Cos.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Cosh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/crop_and_resize_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/CropAndResize.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/cumsum_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Cumsum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/DenseBincount.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/depth_to_space_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/DepthToSpace.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/conv_gpu_depthwise.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/conv_packed_gpu_depthwise.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/DepthwiseConv2dNative.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/conv_backprop_gpu_depthwise.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/DepthwiseConv2dNativeBackpropFilter.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/DepthwiseConv2dNativeBackpropInput.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/diag_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Diag.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/dilation_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Dilation2D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Einsum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Elu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/EluGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Equal.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Erf.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Exp.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ExpandDims.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Expm1.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/fft_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/FFT_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/FFT.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/fill_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Fill.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/flip_left_right_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/FlipLeftRight.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Floor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/FloorDiv.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/FromPixels_utils/from_pixels_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/FromPixels_utils/from_pixels_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/FromPixels.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/FusedConv2D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/FusedDepthwiseConv2D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/gather_nd_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/GatherNd.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/gather_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/GatherV2.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Greater.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/GreaterEqual.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/IFFT.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/IsFinite.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/IsInf.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/IsNaN.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Less.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/LessEqual.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/LinSpace.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Log.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Log1p.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/LogicalAnd.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/LogicalNot.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/LogicalOr.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/lrn_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/lrn_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/LRN.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/lrn_grad_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/LRNGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Max_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Max.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Maximum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/MaxPool.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/MaxPool3D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/max_pool_backprop_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/MaxPool3DGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/MaxPoolGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/MaxPoolWithArgmax_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/MaxPoolWithArgmax.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Mean_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Mean.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Min.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Minimum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/mirror_pad_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/mirror_pad_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/MirrorPad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Mod.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/multinomial_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/RealDiv.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Sub.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Softmax.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Multinomial.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Neg.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/NonMaxSuppressionV3.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/NonMaxSuppressionV4.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/NonMaxSuppressionV5.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/onehot_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/OneHot.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ZerosLike.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/OnesLike.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Pack.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/pad_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/pad_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/PadV2.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Pow.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Prod.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Range.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Reciprocal.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Relu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Relu6.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/resize_bilinear_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/resize_bilinear_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ResizeBilinear.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/resize_bilinear_backprop_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ResizeBilinearGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/resize_nearest_neighbor_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/resize_nearest_neighbor_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ResizeNearestNeighbor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/resize_nearest_neighbor_backprop_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ResizeNearestNeighborGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/reverse_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/reverse_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Reverse.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/rotate_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/RotateWithOffset.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Round.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Rsqrt.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/scatter_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ScatterNd.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/select_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Select.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Selu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Sigmoid.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Sign.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Sin.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Sinh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Softplus.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/SpaceToBatchND.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/SparseFillEmptyRows.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/SparseReshape.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/SparseSegmentMean.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/SparseSegmentSum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/SparseToDense.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/SplitV.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Sqrt.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Square.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/SquaredDifference.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Step.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/strided_slice_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/StridedSlice.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/StringNGrams.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/StringSplit.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/StringToHashBucketFast.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Tan.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Tanh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/tile_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Tile.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/top_k_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/TopK.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/transform_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Transform.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Unique.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Unpack.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/segment_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/UnsortedSegmentSum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/register_all_kernels.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-webgl/src/index.ts", "../node_modules/.pnpm/@tensorflow+tfjs@3.9.0_seedrandom@3.0.5/node_modules/@tensorflow/tfjs/src/version.ts", "../node_modules/.pnpm/@tensorflow+tfjs@3.9.0_seedrandom@3.0.5/node_modules/@tensorflow/tfjs/src/index.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/types.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/_FusedMatMul.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/unary_kernel.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Abs.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/binary_kernel.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Add.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/AddN.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Identity.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Transpose.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/kernel_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/All.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Any.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/ArgMax.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/AvgPool.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Reshape.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/BatchMatMul.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernel_utils/shared.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Slice.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/BatchToSpaceND.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Cast.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Ceil.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/ClipByValue.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Concat.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Conv2D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Conv2DBackpropInput.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Cos.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Cosh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/CropAndResize.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Cumsum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/DepthToSpace.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/DepthwiseConv2dNative.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Elu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Equal.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Exp.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/ExpandDims.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Fill.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/FlipLeftRight.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Floor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/FloorDiv.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/FusedBatchNorm.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/FusedConv2D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/FusedDepthwiseConv2D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/GatherNd.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/GatherV2.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Greater.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/GreaterEqual.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/LeakyRelu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Less.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/LessEqual.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Log.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/LogicalAnd.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Max.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Maximum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/MaxPool.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Mean.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Min.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Minimum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/MirrorPad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Multiply.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Neg.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/NonMaxSuppression_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/NonMaxSuppressionV3.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/NonMaxSuppressionV4.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/NonMaxSuppressionV5.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/NotEqual.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/OneHot.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/OnesLike.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Pack.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/PadV2.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Pow.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Prelu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Prod.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Range.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/RealDiv.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Relu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Relu6.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/ResizeBilinear.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Reverse.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/RotateWithOffset.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Round.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Rsqrt.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/ScatterNd.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Select.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Sigmoid.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Sin.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Softmax.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/SpaceToBatchND.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/SplitV.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Sqrt.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Square.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/SquaredDifference.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Step.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/StridedSlice.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Sub.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Sum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Tan.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Tanh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Tile.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/TopK.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Transform.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Unpack.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/ZerosLike.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/register_all_kernels.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/flags_wasm.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/backend_wasm.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/wasm-out/tfjs-backend-wasm-threaded-simd.worker.js", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/version.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/base.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.9.0_@tensorflow+tfjs-core@3.9.0/node_modules/@tensorflow/tfjs-backend-wasm/src/index.ts", "tfjs.version.js", "../src/blazeface/box.ts", "../src/blazeface/util.ts", "../src/blazeface/blazeface.ts", "../src/blazeface/coords.ts", "../src/image/imagefx.ts", "../src/image/image.ts", "../src/env.ts", "../src/blazeface/facepipeline.ts", "../src/blazeface/facemesh.ts", "../src/faceres/faceres.ts", "../src/emotion/emotion.ts", "../src/posenet/keypoints.ts", "../src/posenet/utils.ts", "../src/posenet/poses.ts", "../src/posenet/posenet.ts", "../src/handpose/box.ts", "../src/handpose/anchors.ts", "../src/handpose/handdetector.ts", "../src/handpose/util.ts", "../src/handpose/handpipeline.ts", "../src/fingerpose/description.ts", "../src/fingerpose/estimator.ts", "../src/fingerpose/gesture.ts", "../src/fingerpose/gestures.ts", "../src/fingerpose/fingerpose.ts", "../src/handpose/handpose.ts", "../src/blazepose/annotations.ts", "../src/blazepose/blazepose.ts", "../src/efficientpose/efficientpose.ts", "../src/movenet/movenet.ts", "../src/object/labels.ts", "../src/object/nanodet.ts", "../src/object/centernet.ts", "../src/segmentation/segmentation.ts", "../src/models.ts", "../src/face.ts", "../src/gesture/gesture.ts", "../src/draw.ts", "../src/persons.ts", "../src/interpolate.ts", "../src/tfjs/humangl.ts", "../src/tfjs/backend.ts", "../src/sample.ts", "../src/warmup.ts", "../src/human.ts"], "sourcesContent": ["/**\n * Simple helper functions used accross codebase\n */\n\n// helper function: join two paths\nexport function join(folder: string, file: string): string {\n const separator = folder.endsWith('/') ? '' : '/';\n const skipJoin = file.startsWith('.') || file.startsWith('/') || file.startsWith('http:') || file.startsWith('https:') || file.startsWith('file:');\n const path = skipJoin ? `${file}` : `${folder}${separator}${file}`;\n if (!path.toLocaleLowerCase().includes('.json')) throw new Error(`modelpath error: ${path} expecting json file`);\n return path;\n}\n\n// helper function: wrapper around console output\nexport function log(...msg): void {\n const dt = new Date();\n const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;\n // eslint-disable-next-line no-console\n if (msg) console.log(ts, 'Human:', ...msg);\n}\n\n// helper function: gets elapsed time on both browser and nodejs\nexport const now = () => {\n if (typeof performance !== 'undefined') return performance.now();\n return parseInt((Number(process.hrtime.bigint()) / 1000 / 1000).toString());\n};\n\n// helper function: perform deep merge of multiple objects so it allows full inheriance with overrides\nexport function mergeDeep(...objects) {\n const isObject = (obj) => obj && typeof obj === 'object';\n return objects.reduce((prev, obj) => {\n Object.keys(obj || {}).forEach((key) => {\n const pVal = prev[key];\n const oVal = obj[key];\n if (Array.isArray(pVal) && Array.isArray(oVal)) prev[key] = pVal.concat(...oVal);\n else if (isObject(pVal) && isObject(oVal)) prev[key] = mergeDeep(pVal, oVal);\n else prev[key] = oVal;\n });\n return prev;\n }, {});\n}\n\n// helper function: return min and max from input array\nexport const minmax = (data: Array) => data.reduce((acc: Array, val) => {\n acc[0] = (acc[0] === undefined || val < acc[0]) ? val : acc[0];\n acc[1] = (acc[1] === undefined || val > acc[1]) ? val : acc[1];\n return acc;\n}, []);\n", "/* eslint-disable indent */\n/* eslint-disable no-multi-spaces */\n\nexport interface FaceDetectorConfig {\n modelPath: string,\n rotation: boolean,\n maxDetected: number,\n skipFrames: number,\n minConfidence: number,\n iouThreshold: number,\n return: boolean,\n}\n\nexport interface FaceMeshConfig {\n enabled: boolean,\n modelPath: string,\n}\n\nexport interface FaceIrisConfig {\n enabled: boolean,\n modelPath: string,\n}\n\nexport interface FaceDescriptionConfig {\n enabled: boolean,\n modelPath: string,\n skipFrames: number,\n minConfidence: number,\n}\n\nexport interface FaceEmotionConfig {\n enabled: boolean,\n minConfidence: number,\n skipFrames: number,\n modelPath: string,\n}\n\n/** Controlls and configures all face-specific options:\n * - face detection, face mesh detection, age, gender, emotion detection and face description\n * Parameters:\n * - enabled: true/false\n * - modelPath: path for each of face models\n * - minConfidence: threshold for discarding a prediction\n * - iouThreshold: ammount of overlap between two detected objects before one object is removed\n * - maxDetected: maximum number of faces detected in the input, should be set to the minimum number for performance\n * - rotation: use calculated rotated face image or just box with rotation as-is, false means higher performance, but incorrect mesh mapping on higher face angles\n * - return: return extracted face as tensor for futher user processing, in which case user is reponsible for manually disposing the tensor\n*/\nexport interface FaceConfig {\n enabled: boolean,\n detector: Partial,\n mesh: Partial,\n iris: Partial,\n description: Partial,\n emotion: Partial,\n}\n\n/** Controlls and configures all body detection specific options\n * - enabled: true/false\n * - modelPath: body pose model, can be absolute path or relative to modelBasePath\n * - minConfidence: threshold for discarding a prediction\n * - maxDetected: maximum number of people detected in the input, should be set to the minimum number for performance\n*/\nexport interface BodyConfig {\n enabled: boolean,\n modelPath: string,\n maxDetected: number,\n minConfidence: number,\n skipFrames: number,\n}\n\n/** Controlls and configures all hand detection specific options\n * - enabled: true/false\n * - landmarks: detect hand landmarks or just hand boundary box\n * - modelPath: paths for hand detector and hand skeleton models, can be absolute path or relative to modelBasePath\n * - minConfidence: threshold for discarding a prediction\n * - iouThreshold: ammount of overlap between two detected objects before one object is removed\n * - maxDetected: maximum number of hands detected in the input, should be set to the minimum number for performance\n * - rotation: use best-guess rotated hand image or just box with rotation as-is, false means higher performance, but incorrect finger mapping if hand is inverted\n*/\nexport interface HandConfig {\n enabled: boolean,\n rotation: boolean,\n skipFrames: number,\n minConfidence: number,\n iouThreshold: number,\n maxDetected: number,\n landmarks: boolean,\n detector: {\n modelPath?: string,\n },\n skeleton: {\n modelPath?: string,\n },\n}\n\n/** Controlls and configures all object detection specific options\n * - enabled: true/false\n * - modelPath: object detection model, can be absolute path or relative to modelBasePath\n * - minConfidence: minimum score that detection must have to return as valid object\n * - iouThreshold: ammount of overlap between two detected objects before one object is removed\n * - maxDetected: maximum number of detections to return\n*/\nexport interface ObjectConfig {\n enabled: boolean,\n modelPath: string,\n minConfidence: number,\n iouThreshold: number,\n maxDetected: number,\n skipFrames: number,\n}\n\n/** Controlls and configures all body segmentation module\n * removes background from input containing person\n * if segmentation is enabled it will run as preprocessing task before any other model\n * alternatively leave it disabled and use it on-demand using human.segmentation method which can\n * remove background or replace it with user-provided background\n *\n * - enabled: true/false\n * - modelPath: object detection model, can be absolute path or relative to modelBasePath\n*/\nexport interface SegmentationConfig {\n enabled: boolean,\n modelPath: string,\n}\n\n/** Run input through image filters before inference\n * - image filters run with near-zero latency as they are executed on the GPU\n*/\nexport interface FilterConfig {\n enabled: boolean,\n /** Resize input width\n * - if both width and height are set to 0, there is no resizing\n * - if just one is set, second one is scaled automatically\n * - if both are set, values are used as-is\n */\n width: number,\n /** Resize input height\n * - if both width and height are set to 0, there is no resizing\n * - if just one is set, second one is scaled automatically\n * - if both are set, values are used as-is\n */\n height: number,\n /** Return processed canvas imagedata in result */\n return: boolean,\n /** Flip input as mirror image */\n flip: boolean,\n /** Range: -1 (darken) to 1 (lighten) */\n brightness: number,\n /** Range: -1 (reduce contrast) to 1 (increase contrast) */\n contrast: number,\n /** Range: 0 (no sharpening) to 1 (maximum sharpening) */\n sharpness: number,\n /** Range: 0 (no blur) to N (blur radius in pixels) */\n blur: number\n /** Range: -1 (reduce saturation) to 1 (increase saturation) */\n saturation: number,\n /** Range: 0 (no change) to 360 (hue rotation in degrees) */\n hue: number,\n /** Image negative */\n negative: boolean,\n /** Image sepia colors */\n sepia: boolean,\n /** Image vintage colors */\n vintage: boolean,\n /** Image kodachrome colors */\n kodachrome: boolean,\n /** Image technicolor colors */\n technicolor: boolean,\n /** Image polaroid camera effect */\n polaroid: boolean,\n /** Range: 0 (no pixelate) to N (number of pixels to pixelate) */\n pixelate: number,\n}\n\n/** Controlls gesture detection */\nexport interface GestureConfig {\n enabled: boolean,\n}\n\n/**\n * Configuration interface definition for **Human** library\n *\n * Contains all configurable parameters\n * @typedef Config\n */\nexport interface Config {\n /** Backend used for TFJS operations */\n backend: '' | 'cpu' | 'wasm' | 'webgl' | 'humangl' | 'tensorflow' | 'webgpu',\n // backend: string;\n\n /** Path to *.wasm files if backend is set to `wasm` */\n wasmPath: string,\n\n /** Print debug statements to console */\n debug: boolean,\n\n /** Perform model loading and inference concurrently or sequentially */\n async: boolean,\n\n /** What to use for `human.warmup()`\n * - warmup pre-initializes all models for faster inference but can take significant time on startup\n * - only used for `webgl` and `humangl` backends\n */\n warmup: 'none' | 'face' | 'full' | 'body',\n // warmup: string;\n\n /** Base model path (typically starting with file://, http:// or https://) for all models\n * - individual modelPath values are relative to this path\n */\n modelBasePath: string,\n\n /** Cache sensitivity\n * - values 0..1 where 0.01 means reset cache if input changed more than 1%\n * - set to 0 to disable caching\n */\n cacheSensitivity: number;\n\n /** Cache sensitivity\n * - values 0..1 where 0.01 means reset cache if input changed more than 1%\n * - set to 0 to disable caching\n */\n skipFrame: boolean;\n\n /** Run input through image filters before inference\n * - image filters run with near-zero latency as they are executed on the GPU\n */\n filter: Partial,\n // type definition end\n\n gesture: Partial;\n\n face: Partial,\n\n body: Partial,\n\n hand: Partial,\n\n object: Partial,\n\n segmentation: Partial,\n}\n\n/**\n * [See all default Config values...](https://github.com/vladmandic/human/blob/main/src/config.ts#L244)\n *\n */\nconst config: Config = {\n backend: '', // select tfjs backend to use, leave empty to use default backend\n // can be 'webgl', 'wasm', 'cpu', or 'humangl' which is a custom version of webgl\n // default set to `humangl` for browsers and `tensorflow` for nodejs\n modelBasePath: '', // base path for all models\n // default set to `../models/` for browsers and `file://models/` for nodejs\n wasmPath: '', // path for wasm binaries, only used for backend: wasm\n // default set to download from jsdeliv during Human class instantiation\n debug: true, // print additional status messages to console\n async: true, // execute enabled models in parallel\n warmup: 'full', // what to use for human.warmup(), can be 'none', 'face', 'full'\n // warmup pre-initializes all models for faster inference but can take\n // significant time on startup\n // only used for `webgl` and `humangl` backends\n cacheSensitivity: 0.75, // cache sensitivity\n // values 0..1 where 0.01 means reset cache if input changed more than 1%\n // set to 0 to disable caching\n skipFrame: false, // internal & dynamic\n filter: { // run input through image filters before inference\n // image filters run with near-zero latency as they are executed on the GPU\n enabled: true, // enable image pre-processing filters\n width: 0, // resize input width\n height: 0, // resize input height\n // if both width and height are set to 0, there is no resizing\n // if just one is set, second one is scaled automatically\n // if both are set, values are used as-is\n flip: false, // flip input as mirror image\n return: true, // return processed canvas imagedata in result\n brightness: 0, // range: -1 (darken) to 1 (lighten)\n contrast: 0, // range: -1 (reduce contrast) to 1 (increase contrast)\n sharpness: 0, // range: 0 (no sharpening) to 1 (maximum sharpening)\n blur: 0, // range: 0 (no blur) to N (blur radius in pixels)\n saturation: 0, // range: -1 (reduce saturation) to 1 (increase saturation)\n hue: 0, // range: 0 (no change) to 360 (hue rotation in degrees)\n negative: false, // image negative\n sepia: false, // image sepia colors\n vintage: false, // image vintage colors\n kodachrome: false, // image kodachrome colors\n technicolor: false, // image technicolor colors\n polaroid: false, // image polaroid camera effect\n pixelate: 0, // range: 0 (no pixelate) to N (number of pixels to pixelate)\n },\n\n gesture: {\n enabled: true, // enable gesture recognition based on model results\n },\n\n face: {\n enabled: true, // controls if specified modul is enabled\n // face.enabled is required for all face models:\n // detector, mesh, iris, age, gender, emotion\n // (note: module is not loaded until it is required)\n detector: {\n modelPath: 'blazeface.json', // detector model, can be absolute path or relative to modelBasePath\n rotation: true, // use best-guess rotated face image or just box with rotation as-is\n // false means higher performance, but incorrect mesh mapping if face angle is above 20 degrees\n // this parameter is not valid in nodejs\n maxDetected: 15, // maximum number of faces detected in the input\n // should be set to the minimum number for performance\n skipFrames: 15, // how many max frames to go without re-running the face bounding box detector\n // only used when cacheSensitivity is not zero\n // e.g., if model is running st 25 FPS, we can re-use existing bounding\n // box for updated face analysis as the head probably hasn't moved much\n // in short time (10 * 1/25 = 0.25 sec)\n minConfidence: 0.2, // threshold for discarding a prediction\n iouThreshold: 0.1, // ammount of overlap between two detected objects before one object is removed\n return: false, // return extracted face as tensor\n // in which case user is reponsible for disposing the tensor\n },\n\n mesh: {\n enabled: true,\n modelPath: 'facemesh.json', // facemesh model, can be absolute path or relative to modelBasePath\n },\n\n iris: {\n enabled: true,\n modelPath: 'iris.json', // face iris model\n // can be either absolute path or relative to modelBasePath\n },\n\n description: {\n enabled: true, // to improve accuracy of face description extraction it is\n // recommended to enable detector.rotation and mesh.enabled\n modelPath: 'faceres.json', // face description model\n // can be either absolute path or relative to modelBasePath\n skipFrames: 11, // how many max frames to go without re-running the detector\n // only used when cacheSensitivity is not zero\n minConfidence: 0.1, // threshold for discarding a prediction\n },\n\n emotion: {\n enabled: true,\n minConfidence: 0.1, // threshold for discarding a prediction\n skipFrames: 17, // how max many frames to go without re-running the detector\n // only used when cacheSensitivity is not zero\n modelPath: 'emotion.json', // face emotion model, can be absolute path or relative to modelBasePath\n },\n },\n\n body: {\n enabled: true,\n modelPath: 'movenet-lightning.json', // body model, can be absolute path or relative to modelBasePath\n // can be 'posenet', 'blazepose', 'efficientpose', 'movenet-lightning', 'movenet-thunder'\n maxDetected: 1, // maximum number of people detected in the input\n // should be set to the minimum number for performance\n // only valid for posenet as other models detects single pose\n minConfidence: 0.2, // threshold for discarding a prediction\n skipFrames: 1, // how many max frames to go without re-running the detector\n // only used when cacheSensitivity is not zero\n},\n\n hand: {\n enabled: true,\n rotation: true, // use best-guess rotated hand image or just box with rotation as-is\n // false means higher performance, but incorrect finger mapping if hand is inverted\n skipFrames: 18, // how many max frames to go without re-running the hand bounding box detector\n // only used when cacheSensitivity is not zero\n // e.g., if model is running st 25 FPS, we can re-use existing bounding\n // box for updated hand skeleton analysis as the hand probably\n // hasn't moved much in short time (10 * 1/25 = 0.25 sec)\n minConfidence: 0.8, // threshold for discarding a prediction\n iouThreshold: 0.2, // ammount of overlap between two detected objects before one object is removed\n maxDetected: 1, // maximum number of hands detected in the input\n // should be set to the minimum number for performance\n landmarks: true, // detect hand landmarks or just hand boundary box\n detector: {\n modelPath: 'handdetect.json', // hand detector model, can be absolute path or relative to modelBasePath\n },\n skeleton: {\n modelPath: 'handskeleton.json', // hand skeleton model, can be absolute path or relative to modelBasePath\n },\n },\n\n object: {\n enabled: false,\n modelPath: 'mb3-centernet.json', // experimental: object detection model, can be absolute path or relative to modelBasePath\n // can be 'mb3-centernet' or 'nanodet'\n minConfidence: 0.2, // threshold for discarding a prediction\n iouThreshold: 0.4, // ammount of overlap between two detected objects before one object is removed\n maxDetected: 10, // maximum number of objects detected in the input\n skipFrames: 19, // how many max frames to go without re-running the detector\n // only used when cacheSensitivity is not zero\n },\n\n segmentation: {\n enabled: false, // controlls and configures all body segmentation module\n // removes background from input containing person\n // if segmentation is enabled it will run as preprocessing task before any other model\n // alternatively leave it disabled and use it on-demand using human.segmentation method which can\n // remove background or replace it with user-provided background\n modelPath: 'selfie.json', // experimental: object detection model, can be absolute path or relative to modelBasePath\n // can be 'selfie' or 'meet'\n },\n};\nexport { config as defaults };\n", "module.exports = Long;\r\n\r\n/**\r\n * wasm optimizations, to do native i64 multiplication and divide\r\n */\r\nvar wasm = null;\r\n\r\ntry {\r\n wasm = new WebAssembly.Instance(new WebAssembly.Module(new Uint8Array([\r\n 0, 97, 115, 109, 1, 0, 0, 0, 1, 13, 2, 96, 0, 1, 127, 96, 4, 127, 127, 127, 127, 1, 127, 3, 7, 6, 0, 1, 1, 1, 1, 1, 6, 6, 1, 127, 1, 65, 0, 11, 7, 50, 6, 3, 109, 117, 108, 0, 1, 5, 100, 105, 118, 95, 115, 0, 2, 5, 100, 105, 118, 95, 117, 0, 3, 5, 114, 101, 109, 95, 115, 0, 4, 5, 114, 101, 109, 95, 117, 0, 5, 8, 103, 101, 116, 95, 104, 105, 103, 104, 0, 0, 10, 191, 1, 6, 4, 0, 35, 0, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 126, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 127, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 128, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 129, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 130, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11\r\n ])), {}).exports;\r\n} catch (e) {\r\n // no wasm support :(\r\n}\r\n\r\n/**\r\n * Constructs a 64 bit two's-complement integer, given its low and high 32 bit values as *signed* integers.\r\n * See the from* functions below for more convenient ways of constructing Longs.\r\n * @exports Long\r\n * @class A Long class for representing a 64 bit two's-complement integer value.\r\n * @param {number} low The low (signed) 32 bits of the long\r\n * @param {number} high The high (signed) 32 bits of the long\r\n * @param {boolean=} unsigned Whether unsigned or not, defaults to signed\r\n * @constructor\r\n */\r\nfunction Long(low, high, unsigned) {\r\n\r\n /**\r\n * The low 32 bits as a signed value.\r\n * @type {number}\r\n */\r\n this.low = low | 0;\r\n\r\n /**\r\n * The high 32 bits as a signed value.\r\n * @type {number}\r\n */\r\n this.high = high | 0;\r\n\r\n /**\r\n * Whether unsigned or not.\r\n * @type {boolean}\r\n */\r\n this.unsigned = !!unsigned;\r\n}\r\n\r\n// The internal representation of a long is the two given signed, 32-bit values.\r\n// We use 32-bit pieces because these are the size of integers on which\r\n// Javascript performs bit-operations. For operations like addition and\r\n// multiplication, we split each number into 16 bit pieces, which can easily be\r\n// multiplied within Javascript's floating-point representation without overflow\r\n// or change in sign.\r\n//\r\n// In the algorithms below, we frequently reduce the negative case to the\r\n// positive case by negating the input(s) and then post-processing the result.\r\n// Note that we must ALWAYS check specially whether those values are MIN_VALUE\r\n// (-2^63) because -MIN_VALUE == MIN_VALUE (since 2^63 cannot be represented as\r\n// a positive number, it overflows back into a negative). Not handling this\r\n// case would often result in infinite recursion.\r\n//\r\n// Common constant values ZERO, ONE, NEG_ONE, etc. are defined below the from*\r\n// methods on which they depend.\r\n\r\n/**\r\n * An indicator used to reliably determine if an object is a Long or not.\r\n * @type {boolean}\r\n * @const\r\n * @private\r\n */\r\nLong.prototype.__isLong__;\r\n\r\nObject.defineProperty(Long.prototype, \"__isLong__\", { value: true });\r\n\r\n/**\r\n * @function\r\n * @param {*} obj Object\r\n * @returns {boolean}\r\n * @inner\r\n */\r\nfunction isLong(obj) {\r\n return (obj && obj[\"__isLong__\"]) === true;\r\n}\r\n\r\n/**\r\n * Tests if the specified object is a Long.\r\n * @function\r\n * @param {*} obj Object\r\n * @returns {boolean}\r\n */\r\nLong.isLong = isLong;\r\n\r\n/**\r\n * A cache of the Long representations of small integer values.\r\n * @type {!Object}\r\n * @inner\r\n */\r\nvar INT_CACHE = {};\r\n\r\n/**\r\n * A cache of the Long representations of small unsigned integer values.\r\n * @type {!Object}\r\n * @inner\r\n */\r\nvar UINT_CACHE = {};\r\n\r\n/**\r\n * @param {number} value\r\n * @param {boolean=} unsigned\r\n * @returns {!Long}\r\n * @inner\r\n */\r\nfunction fromInt(value, unsigned) {\r\n var obj, cachedObj, cache;\r\n if (unsigned) {\r\n value >>>= 0;\r\n if (cache = (0 <= value && value < 256)) {\r\n cachedObj = UINT_CACHE[value];\r\n if (cachedObj)\r\n return cachedObj;\r\n }\r\n obj = fromBits(value, (value | 0) < 0 ? -1 : 0, true);\r\n if (cache)\r\n UINT_CACHE[value] = obj;\r\n return obj;\r\n } else {\r\n value |= 0;\r\n if (cache = (-128 <= value && value < 128)) {\r\n cachedObj = INT_CACHE[value];\r\n if (cachedObj)\r\n return cachedObj;\r\n }\r\n obj = fromBits(value, value < 0 ? -1 : 0, false);\r\n if (cache)\r\n INT_CACHE[value] = obj;\r\n return obj;\r\n }\r\n}\r\n\r\n/**\r\n * Returns a Long representing the given 32 bit integer value.\r\n * @function\r\n * @param {number} value The 32 bit integer in question\r\n * @param {boolean=} unsigned Whether unsigned or not, defaults to signed\r\n * @returns {!Long} The corresponding Long value\r\n */\r\nLong.fromInt = fromInt;\r\n\r\n/**\r\n * @param {number} value\r\n * @param {boolean=} unsigned\r\n * @returns {!Long}\r\n * @inner\r\n */\r\nfunction fromNumber(value, unsigned) {\r\n if (isNaN(value))\r\n return unsigned ? UZERO : ZERO;\r\n if (unsigned) {\r\n if (value < 0)\r\n return UZERO;\r\n if (value >= TWO_PWR_64_DBL)\r\n return MAX_UNSIGNED_VALUE;\r\n } else {\r\n if (value <= -TWO_PWR_63_DBL)\r\n return MIN_VALUE;\r\n if (value + 1 >= TWO_PWR_63_DBL)\r\n return MAX_VALUE;\r\n }\r\n if (value < 0)\r\n return fromNumber(-value, unsigned).neg();\r\n return fromBits((value % TWO_PWR_32_DBL) | 0, (value / TWO_PWR_32_DBL) | 0, unsigned);\r\n}\r\n\r\n/**\r\n * Returns a Long representing the given value, provided that it is a finite number. Otherwise, zero is returned.\r\n * @function\r\n * @param {number} value The number in question\r\n * @param {boolean=} unsigned Whether unsigned or not, defaults to signed\r\n * @returns {!Long} The corresponding Long value\r\n */\r\nLong.fromNumber = fromNumber;\r\n\r\n/**\r\n * @param {number} lowBits\r\n * @param {number} highBits\r\n * @param {boolean=} unsigned\r\n * @returns {!Long}\r\n * @inner\r\n */\r\nfunction fromBits(lowBits, highBits, unsigned) {\r\n return new Long(lowBits, highBits, unsigned);\r\n}\r\n\r\n/**\r\n * Returns a Long representing the 64 bit integer that comes by concatenating the given low and high bits. Each is\r\n * assumed to use 32 bits.\r\n * @function\r\n * @param {number} lowBits The low 32 bits\r\n * @param {number} highBits The high 32 bits\r\n * @param {boolean=} unsigned Whether unsigned or not, defaults to signed\r\n * @returns {!Long} The corresponding Long value\r\n */\r\nLong.fromBits = fromBits;\r\n\r\n/**\r\n * @function\r\n * @param {number} base\r\n * @param {number} exponent\r\n * @returns {number}\r\n * @inner\r\n */\r\nvar pow_dbl = Math.pow; // Used 4 times (4*8 to 15+4)\r\n\r\n/**\r\n * @param {string} str\r\n * @param {(boolean|number)=} unsigned\r\n * @param {number=} radix\r\n * @returns {!Long}\r\n * @inner\r\n */\r\nfunction fromString(str, unsigned, radix) {\r\n if (str.length === 0)\r\n throw Error('empty string');\r\n if (str === \"NaN\" || str === \"Infinity\" || str === \"+Infinity\" || str === \"-Infinity\")\r\n return ZERO;\r\n if (typeof unsigned === 'number') {\r\n // For goog.math.long compatibility\r\n radix = unsigned,\r\n unsigned = false;\r\n } else {\r\n unsigned = !! unsigned;\r\n }\r\n radix = radix || 10;\r\n if (radix < 2 || 36 < radix)\r\n throw RangeError('radix');\r\n\r\n var p;\r\n if ((p = str.indexOf('-')) > 0)\r\n throw Error('interior hyphen');\r\n else if (p === 0) {\r\n return fromString(str.substring(1), unsigned, radix).neg();\r\n }\r\n\r\n // Do several (8) digits each time through the loop, so as to\r\n // minimize the calls to the very expensive emulated div.\r\n var radixToPower = fromNumber(pow_dbl(radix, 8));\r\n\r\n var result = ZERO;\r\n for (var i = 0; i < str.length; i += 8) {\r\n var size = Math.min(8, str.length - i),\r\n value = parseInt(str.substring(i, i + size), radix);\r\n if (size < 8) {\r\n var power = fromNumber(pow_dbl(radix, size));\r\n result = result.mul(power).add(fromNumber(value));\r\n } else {\r\n result = result.mul(radixToPower);\r\n result = result.add(fromNumber(value));\r\n }\r\n }\r\n result.unsigned = unsigned;\r\n return result;\r\n}\r\n\r\n/**\r\n * Returns a Long representation of the given string, written using the specified radix.\r\n * @function\r\n * @param {string} str The textual representation of the Long\r\n * @param {(boolean|number)=} unsigned Whether unsigned or not, defaults to signed\r\n * @param {number=} radix The radix in which the text is written (2-36), defaults to 10\r\n * @returns {!Long} The corresponding Long value\r\n */\r\nLong.fromString = fromString;\r\n\r\n/**\r\n * @function\r\n * @param {!Long|number|string|!{low: number, high: number, unsigned: boolean}} val\r\n * @param {boolean=} unsigned\r\n * @returns {!Long}\r\n * @inner\r\n */\r\nfunction fromValue(val, unsigned) {\r\n if (typeof val === 'number')\r\n return fromNumber(val, unsigned);\r\n if (typeof val === 'string')\r\n return fromString(val, unsigned);\r\n // Throws for non-objects, converts non-instanceof Long:\r\n return fromBits(val.low, val.high, typeof unsigned === 'boolean' ? unsigned : val.unsigned);\r\n}\r\n\r\n/**\r\n * Converts the specified value to a Long using the appropriate from* function for its type.\r\n * @function\r\n * @param {!Long|number|string|!{low: number, high: number, unsigned: boolean}} val Value\r\n * @param {boolean=} unsigned Whether unsigned or not, defaults to signed\r\n * @returns {!Long}\r\n */\r\nLong.fromValue = fromValue;\r\n\r\n// NOTE: the compiler should inline these constant values below and then remove these variables, so there should be\r\n// no runtime penalty for these.\r\n\r\n/**\r\n * @type {number}\r\n * @const\r\n * @inner\r\n */\r\nvar TWO_PWR_16_DBL = 1 << 16;\r\n\r\n/**\r\n * @type {number}\r\n * @const\r\n * @inner\r\n */\r\nvar TWO_PWR_24_DBL = 1 << 24;\r\n\r\n/**\r\n * @type {number}\r\n * @const\r\n * @inner\r\n */\r\nvar TWO_PWR_32_DBL = TWO_PWR_16_DBL * TWO_PWR_16_DBL;\r\n\r\n/**\r\n * @type {number}\r\n * @const\r\n * @inner\r\n */\r\nvar TWO_PWR_64_DBL = TWO_PWR_32_DBL * TWO_PWR_32_DBL;\r\n\r\n/**\r\n * @type {number}\r\n * @const\r\n * @inner\r\n */\r\nvar TWO_PWR_63_DBL = TWO_PWR_64_DBL / 2;\r\n\r\n/**\r\n * @type {!Long}\r\n * @const\r\n * @inner\r\n */\r\nvar TWO_PWR_24 = fromInt(TWO_PWR_24_DBL);\r\n\r\n/**\r\n * @type {!Long}\r\n * @inner\r\n */\r\nvar ZERO = fromInt(0);\r\n\r\n/**\r\n * Signed zero.\r\n * @type {!Long}\r\n */\r\nLong.ZERO = ZERO;\r\n\r\n/**\r\n * @type {!Long}\r\n * @inner\r\n */\r\nvar UZERO = fromInt(0, true);\r\n\r\n/**\r\n * Unsigned zero.\r\n * @type {!Long}\r\n */\r\nLong.UZERO = UZERO;\r\n\r\n/**\r\n * @type {!Long}\r\n * @inner\r\n */\r\nvar ONE = fromInt(1);\r\n\r\n/**\r\n * Signed one.\r\n * @type {!Long}\r\n */\r\nLong.ONE = ONE;\r\n\r\n/**\r\n * @type {!Long}\r\n * @inner\r\n */\r\nvar UONE = fromInt(1, true);\r\n\r\n/**\r\n * Unsigned one.\r\n * @type {!Long}\r\n */\r\nLong.UONE = UONE;\r\n\r\n/**\r\n * @type {!Long}\r\n * @inner\r\n */\r\nvar NEG_ONE = fromInt(-1);\r\n\r\n/**\r\n * Signed negative one.\r\n * @type {!Long}\r\n */\r\nLong.NEG_ONE = NEG_ONE;\r\n\r\n/**\r\n * @type {!Long}\r\n * @inner\r\n */\r\nvar MAX_VALUE = fromBits(0xFFFFFFFF|0, 0x7FFFFFFF|0, false);\r\n\r\n/**\r\n * Maximum signed value.\r\n * @type {!Long}\r\n */\r\nLong.MAX_VALUE = MAX_VALUE;\r\n\r\n/**\r\n * @type {!Long}\r\n * @inner\r\n */\r\nvar MAX_UNSIGNED_VALUE = fromBits(0xFFFFFFFF|0, 0xFFFFFFFF|0, true);\r\n\r\n/**\r\n * Maximum unsigned value.\r\n * @type {!Long}\r\n */\r\nLong.MAX_UNSIGNED_VALUE = MAX_UNSIGNED_VALUE;\r\n\r\n/**\r\n * @type {!Long}\r\n * @inner\r\n */\r\nvar MIN_VALUE = fromBits(0, 0x80000000|0, false);\r\n\r\n/**\r\n * Minimum signed value.\r\n * @type {!Long}\r\n */\r\nLong.MIN_VALUE = MIN_VALUE;\r\n\r\n/**\r\n * @alias Long.prototype\r\n * @inner\r\n */\r\nvar LongPrototype = Long.prototype;\r\n\r\n/**\r\n * Converts the Long to a 32 bit integer, assuming it is a 32 bit integer.\r\n * @returns {number}\r\n */\r\nLongPrototype.toInt = function toInt() {\r\n return this.unsigned ? this.low >>> 0 : this.low;\r\n};\r\n\r\n/**\r\n * Converts the Long to a the nearest floating-point representation of this value (double, 53 bit mantissa).\r\n * @returns {number}\r\n */\r\nLongPrototype.toNumber = function toNumber() {\r\n if (this.unsigned)\r\n return ((this.high >>> 0) * TWO_PWR_32_DBL) + (this.low >>> 0);\r\n return this.high * TWO_PWR_32_DBL + (this.low >>> 0);\r\n};\r\n\r\n/**\r\n * Converts the Long to a string written in the specified radix.\r\n * @param {number=} radix Radix (2-36), defaults to 10\r\n * @returns {string}\r\n * @override\r\n * @throws {RangeError} If `radix` is out of range\r\n */\r\nLongPrototype.toString = function toString(radix) {\r\n radix = radix || 10;\r\n if (radix < 2 || 36 < radix)\r\n throw RangeError('radix');\r\n if (this.isZero())\r\n return '0';\r\n if (this.isNegative()) { // Unsigned Longs are never negative\r\n if (this.eq(MIN_VALUE)) {\r\n // We need to change the Long value before it can be negated, so we remove\r\n // the bottom-most digit in this base and then recurse to do the rest.\r\n var radixLong = fromNumber(radix),\r\n div = this.div(radixLong),\r\n rem1 = div.mul(radixLong).sub(this);\r\n return div.toString(radix) + rem1.toInt().toString(radix);\r\n } else\r\n return '-' + this.neg().toString(radix);\r\n }\r\n\r\n // Do several (6) digits each time through the loop, so as to\r\n // minimize the calls to the very expensive emulated div.\r\n var radixToPower = fromNumber(pow_dbl(radix, 6), this.unsigned),\r\n rem = this;\r\n var result = '';\r\n while (true) {\r\n var remDiv = rem.div(radixToPower),\r\n intval = rem.sub(remDiv.mul(radixToPower)).toInt() >>> 0,\r\n digits = intval.toString(radix);\r\n rem = remDiv;\r\n if (rem.isZero())\r\n return digits + result;\r\n else {\r\n while (digits.length < 6)\r\n digits = '0' + digits;\r\n result = '' + digits + result;\r\n }\r\n }\r\n};\r\n\r\n/**\r\n * Gets the high 32 bits as a signed integer.\r\n * @returns {number} Signed high bits\r\n */\r\nLongPrototype.getHighBits = function getHighBits() {\r\n return this.high;\r\n};\r\n\r\n/**\r\n * Gets the high 32 bits as an unsigned integer.\r\n * @returns {number} Unsigned high bits\r\n */\r\nLongPrototype.getHighBitsUnsigned = function getHighBitsUnsigned() {\r\n return this.high >>> 0;\r\n};\r\n\r\n/**\r\n * Gets the low 32 bits as a signed integer.\r\n * @returns {number} Signed low bits\r\n */\r\nLongPrototype.getLowBits = function getLowBits() {\r\n return this.low;\r\n};\r\n\r\n/**\r\n * Gets the low 32 bits as an unsigned integer.\r\n * @returns {number} Unsigned low bits\r\n */\r\nLongPrototype.getLowBitsUnsigned = function getLowBitsUnsigned() {\r\n return this.low >>> 0;\r\n};\r\n\r\n/**\r\n * Gets the number of bits needed to represent the absolute value of this Long.\r\n * @returns {number}\r\n */\r\nLongPrototype.getNumBitsAbs = function getNumBitsAbs() {\r\n if (this.isNegative()) // Unsigned Longs are never negative\r\n return this.eq(MIN_VALUE) ? 64 : this.neg().getNumBitsAbs();\r\n var val = this.high != 0 ? this.high : this.low;\r\n for (var bit = 31; bit > 0; bit--)\r\n if ((val & (1 << bit)) != 0)\r\n break;\r\n return this.high != 0 ? bit + 33 : bit + 1;\r\n};\r\n\r\n/**\r\n * Tests if this Long's value equals zero.\r\n * @returns {boolean}\r\n */\r\nLongPrototype.isZero = function isZero() {\r\n return this.high === 0 && this.low === 0;\r\n};\r\n\r\n/**\r\n * Tests if this Long's value equals zero. This is an alias of {@link Long#isZero}.\r\n * @returns {boolean}\r\n */\r\nLongPrototype.eqz = LongPrototype.isZero;\r\n\r\n/**\r\n * Tests if this Long's value is negative.\r\n * @returns {boolean}\r\n */\r\nLongPrototype.isNegative = function isNegative() {\r\n return !this.unsigned && this.high < 0;\r\n};\r\n\r\n/**\r\n * Tests if this Long's value is positive.\r\n * @returns {boolean}\r\n */\r\nLongPrototype.isPositive = function isPositive() {\r\n return this.unsigned || this.high >= 0;\r\n};\r\n\r\n/**\r\n * Tests if this Long's value is odd.\r\n * @returns {boolean}\r\n */\r\nLongPrototype.isOdd = function isOdd() {\r\n return (this.low & 1) === 1;\r\n};\r\n\r\n/**\r\n * Tests if this Long's value is even.\r\n * @returns {boolean}\r\n */\r\nLongPrototype.isEven = function isEven() {\r\n return (this.low & 1) === 0;\r\n};\r\n\r\n/**\r\n * Tests if this Long's value equals the specified's.\r\n * @param {!Long|number|string} other Other value\r\n * @returns {boolean}\r\n */\r\nLongPrototype.equals = function equals(other) {\r\n if (!isLong(other))\r\n other = fromValue(other);\r\n if (this.unsigned !== other.unsigned && (this.high >>> 31) === 1 && (other.high >>> 31) === 1)\r\n return false;\r\n return this.high === other.high && this.low === other.low;\r\n};\r\n\r\n/**\r\n * Tests if this Long's value equals the specified's. This is an alias of {@link Long#equals}.\r\n * @function\r\n * @param {!Long|number|string} other Other value\r\n * @returns {boolean}\r\n */\r\nLongPrototype.eq = LongPrototype.equals;\r\n\r\n/**\r\n * Tests if this Long's value differs from the specified's.\r\n * @param {!Long|number|string} other Other value\r\n * @returns {boolean}\r\n */\r\nLongPrototype.notEquals = function notEquals(other) {\r\n return !this.eq(/* validates */ other);\r\n};\r\n\r\n/**\r\n * Tests if this Long's value differs from the specified's. This is an alias of {@link Long#notEquals}.\r\n * @function\r\n * @param {!Long|number|string} other Other value\r\n * @returns {boolean}\r\n */\r\nLongPrototype.neq = LongPrototype.notEquals;\r\n\r\n/**\r\n * Tests if this Long's value differs from the specified's. This is an alias of {@link Long#notEquals}.\r\n * @function\r\n * @param {!Long|number|string} other Other value\r\n * @returns {boolean}\r\n */\r\nLongPrototype.ne = LongPrototype.notEquals;\r\n\r\n/**\r\n * Tests if this Long's value is less than the specified's.\r\n * @param {!Long|number|string} other Other value\r\n * @returns {boolean}\r\n */\r\nLongPrototype.lessThan = function lessThan(other) {\r\n return this.comp(/* validates */ other) < 0;\r\n};\r\n\r\n/**\r\n * Tests if this Long's value is less than the specified's. This is an alias of {@link Long#lessThan}.\r\n * @function\r\n * @param {!Long|number|string} other Other value\r\n * @returns {boolean}\r\n */\r\nLongPrototype.lt = LongPrototype.lessThan;\r\n\r\n/**\r\n * Tests if this Long's value is less than or equal the specified's.\r\n * @param {!Long|number|string} other Other value\r\n * @returns {boolean}\r\n */\r\nLongPrototype.lessThanOrEqual = function lessThanOrEqual(other) {\r\n return this.comp(/* validates */ other) <= 0;\r\n};\r\n\r\n/**\r\n * Tests if this Long's value is less than or equal the specified's. This is an alias of {@link Long#lessThanOrEqual}.\r\n * @function\r\n * @param {!Long|number|string} other Other value\r\n * @returns {boolean}\r\n */\r\nLongPrototype.lte = LongPrototype.lessThanOrEqual;\r\n\r\n/**\r\n * Tests if this Long's value is less than or equal the specified's. This is an alias of {@link Long#lessThanOrEqual}.\r\n * @function\r\n * @param {!Long|number|string} other Other value\r\n * @returns {boolean}\r\n */\r\nLongPrototype.le = LongPrototype.lessThanOrEqual;\r\n\r\n/**\r\n * Tests if this Long's value is greater than the specified's.\r\n * @param {!Long|number|string} other Other value\r\n * @returns {boolean}\r\n */\r\nLongPrototype.greaterThan = function greaterThan(other) {\r\n return this.comp(/* validates */ other) > 0;\r\n};\r\n\r\n/**\r\n * Tests if this Long's value is greater than the specified's. This is an alias of {@link Long#greaterThan}.\r\n * @function\r\n * @param {!Long|number|string} other Other value\r\n * @returns {boolean}\r\n */\r\nLongPrototype.gt = LongPrototype.greaterThan;\r\n\r\n/**\r\n * Tests if this Long's value is greater than or equal the specified's.\r\n * @param {!Long|number|string} other Other value\r\n * @returns {boolean}\r\n */\r\nLongPrototype.greaterThanOrEqual = function greaterThanOrEqual(other) {\r\n return this.comp(/* validates */ other) >= 0;\r\n};\r\n\r\n/**\r\n * Tests if this Long's value is greater than or equal the specified's. This is an alias of {@link Long#greaterThanOrEqual}.\r\n * @function\r\n * @param {!Long|number|string} other Other value\r\n * @returns {boolean}\r\n */\r\nLongPrototype.gte = LongPrototype.greaterThanOrEqual;\r\n\r\n/**\r\n * Tests if this Long's value is greater than or equal the specified's. This is an alias of {@link Long#greaterThanOrEqual}.\r\n * @function\r\n * @param {!Long|number|string} other Other value\r\n * @returns {boolean}\r\n */\r\nLongPrototype.ge = LongPrototype.greaterThanOrEqual;\r\n\r\n/**\r\n * Compares this Long's value with the specified's.\r\n * @param {!Long|number|string} other Other value\r\n * @returns {number} 0 if they are the same, 1 if the this is greater and -1\r\n * if the given one is greater\r\n */\r\nLongPrototype.compare = function compare(other) {\r\n if (!isLong(other))\r\n other = fromValue(other);\r\n if (this.eq(other))\r\n return 0;\r\n var thisNeg = this.isNegative(),\r\n otherNeg = other.isNegative();\r\n if (thisNeg && !otherNeg)\r\n return -1;\r\n if (!thisNeg && otherNeg)\r\n return 1;\r\n // At this point the sign bits are the same\r\n if (!this.unsigned)\r\n return this.sub(other).isNegative() ? -1 : 1;\r\n // Both are positive if at least one is unsigned\r\n return (other.high >>> 0) > (this.high >>> 0) || (other.high === this.high && (other.low >>> 0) > (this.low >>> 0)) ? -1 : 1;\r\n};\r\n\r\n/**\r\n * Compares this Long's value with the specified's. This is an alias of {@link Long#compare}.\r\n * @function\r\n * @param {!Long|number|string} other Other value\r\n * @returns {number} 0 if they are the same, 1 if the this is greater and -1\r\n * if the given one is greater\r\n */\r\nLongPrototype.comp = LongPrototype.compare;\r\n\r\n/**\r\n * Negates this Long's value.\r\n * @returns {!Long} Negated Long\r\n */\r\nLongPrototype.negate = function negate() {\r\n if (!this.unsigned && this.eq(MIN_VALUE))\r\n return MIN_VALUE;\r\n return this.not().add(ONE);\r\n};\r\n\r\n/**\r\n * Negates this Long's value. This is an alias of {@link Long#negate}.\r\n * @function\r\n * @returns {!Long} Negated Long\r\n */\r\nLongPrototype.neg = LongPrototype.negate;\r\n\r\n/**\r\n * Returns the sum of this and the specified Long.\r\n * @param {!Long|number|string} addend Addend\r\n * @returns {!Long} Sum\r\n */\r\nLongPrototype.add = function add(addend) {\r\n if (!isLong(addend))\r\n addend = fromValue(addend);\r\n\r\n // Divide each number into 4 chunks of 16 bits, and then sum the chunks.\r\n\r\n var a48 = this.high >>> 16;\r\n var a32 = this.high & 0xFFFF;\r\n var a16 = this.low >>> 16;\r\n var a00 = this.low & 0xFFFF;\r\n\r\n var b48 = addend.high >>> 16;\r\n var b32 = addend.high & 0xFFFF;\r\n var b16 = addend.low >>> 16;\r\n var b00 = addend.low & 0xFFFF;\r\n\r\n var c48 = 0, c32 = 0, c16 = 0, c00 = 0;\r\n c00 += a00 + b00;\r\n c16 += c00 >>> 16;\r\n c00 &= 0xFFFF;\r\n c16 += a16 + b16;\r\n c32 += c16 >>> 16;\r\n c16 &= 0xFFFF;\r\n c32 += a32 + b32;\r\n c48 += c32 >>> 16;\r\n c32 &= 0xFFFF;\r\n c48 += a48 + b48;\r\n c48 &= 0xFFFF;\r\n return fromBits((c16 << 16) | c00, (c48 << 16) | c32, this.unsigned);\r\n};\r\n\r\n/**\r\n * Returns the difference of this and the specified Long.\r\n * @param {!Long|number|string} subtrahend Subtrahend\r\n * @returns {!Long} Difference\r\n */\r\nLongPrototype.subtract = function subtract(subtrahend) {\r\n if (!isLong(subtrahend))\r\n subtrahend = fromValue(subtrahend);\r\n return this.add(subtrahend.neg());\r\n};\r\n\r\n/**\r\n * Returns the difference of this and the specified Long. This is an alias of {@link Long#subtract}.\r\n * @function\r\n * @param {!Long|number|string} subtrahend Subtrahend\r\n * @returns {!Long} Difference\r\n */\r\nLongPrototype.sub = LongPrototype.subtract;\r\n\r\n/**\r\n * Returns the product of this and the specified Long.\r\n * @param {!Long|number|string} multiplier Multiplier\r\n * @returns {!Long} Product\r\n */\r\nLongPrototype.multiply = function multiply(multiplier) {\r\n if (this.isZero())\r\n return ZERO;\r\n if (!isLong(multiplier))\r\n multiplier = fromValue(multiplier);\r\n\r\n // use wasm support if present\r\n if (wasm) {\r\n var low = wasm.mul(this.low,\r\n this.high,\r\n multiplier.low,\r\n multiplier.high);\r\n return fromBits(low, wasm.get_high(), this.unsigned);\r\n }\r\n\r\n if (multiplier.isZero())\r\n return ZERO;\r\n if (this.eq(MIN_VALUE))\r\n return multiplier.isOdd() ? MIN_VALUE : ZERO;\r\n if (multiplier.eq(MIN_VALUE))\r\n return this.isOdd() ? MIN_VALUE : ZERO;\r\n\r\n if (this.isNegative()) {\r\n if (multiplier.isNegative())\r\n return this.neg().mul(multiplier.neg());\r\n else\r\n return this.neg().mul(multiplier).neg();\r\n } else if (multiplier.isNegative())\r\n return this.mul(multiplier.neg()).neg();\r\n\r\n // If both longs are small, use float multiplication\r\n if (this.lt(TWO_PWR_24) && multiplier.lt(TWO_PWR_24))\r\n return fromNumber(this.toNumber() * multiplier.toNumber(), this.unsigned);\r\n\r\n // Divide each long into 4 chunks of 16 bits, and then add up 4x4 products.\r\n // We can skip products that would overflow.\r\n\r\n var a48 = this.high >>> 16;\r\n var a32 = this.high & 0xFFFF;\r\n var a16 = this.low >>> 16;\r\n var a00 = this.low & 0xFFFF;\r\n\r\n var b48 = multiplier.high >>> 16;\r\n var b32 = multiplier.high & 0xFFFF;\r\n var b16 = multiplier.low >>> 16;\r\n var b00 = multiplier.low & 0xFFFF;\r\n\r\n var c48 = 0, c32 = 0, c16 = 0, c00 = 0;\r\n c00 += a00 * b00;\r\n c16 += c00 >>> 16;\r\n c00 &= 0xFFFF;\r\n c16 += a16 * b00;\r\n c32 += c16 >>> 16;\r\n c16 &= 0xFFFF;\r\n c16 += a00 * b16;\r\n c32 += c16 >>> 16;\r\n c16 &= 0xFFFF;\r\n c32 += a32 * b00;\r\n c48 += c32 >>> 16;\r\n c32 &= 0xFFFF;\r\n c32 += a16 * b16;\r\n c48 += c32 >>> 16;\r\n c32 &= 0xFFFF;\r\n c32 += a00 * b32;\r\n c48 += c32 >>> 16;\r\n c32 &= 0xFFFF;\r\n c48 += a48 * b00 + a32 * b16 + a16 * b32 + a00 * b48;\r\n c48 &= 0xFFFF;\r\n return fromBits((c16 << 16) | c00, (c48 << 16) | c32, this.unsigned);\r\n};\r\n\r\n/**\r\n * Returns the product of this and the specified Long. This is an alias of {@link Long#multiply}.\r\n * @function\r\n * @param {!Long|number|string} multiplier Multiplier\r\n * @returns {!Long} Product\r\n */\r\nLongPrototype.mul = LongPrototype.multiply;\r\n\r\n/**\r\n * Returns this Long divided by the specified. The result is signed if this Long is signed or\r\n * unsigned if this Long is unsigned.\r\n * @param {!Long|number|string} divisor Divisor\r\n * @returns {!Long} Quotient\r\n */\r\nLongPrototype.divide = function divide(divisor) {\r\n if (!isLong(divisor))\r\n divisor = fromValue(divisor);\r\n if (divisor.isZero())\r\n throw Error('division by zero');\r\n\r\n // use wasm support if present\r\n if (wasm) {\r\n // guard against signed division overflow: the largest\r\n // negative number / -1 would be 1 larger than the largest\r\n // positive number, due to two's complement.\r\n if (!this.unsigned &&\r\n this.high === -0x80000000 &&\r\n divisor.low === -1 && divisor.high === -1) {\r\n // be consistent with non-wasm code path\r\n return this;\r\n }\r\n var low = (this.unsigned ? wasm.div_u : wasm.div_s)(\r\n this.low,\r\n this.high,\r\n divisor.low,\r\n divisor.high\r\n );\r\n return fromBits(low, wasm.get_high(), this.unsigned);\r\n }\r\n\r\n if (this.isZero())\r\n return this.unsigned ? UZERO : ZERO;\r\n var approx, rem, res;\r\n if (!this.unsigned) {\r\n // This section is only relevant for signed longs and is derived from the\r\n // closure library as a whole.\r\n if (this.eq(MIN_VALUE)) {\r\n if (divisor.eq(ONE) || divisor.eq(NEG_ONE))\r\n return MIN_VALUE; // recall that -MIN_VALUE == MIN_VALUE\r\n else if (divisor.eq(MIN_VALUE))\r\n return ONE;\r\n else {\r\n // At this point, we have |other| >= 2, so |this/other| < |MIN_VALUE|.\r\n var halfThis = this.shr(1);\r\n approx = halfThis.div(divisor).shl(1);\r\n if (approx.eq(ZERO)) {\r\n return divisor.isNegative() ? ONE : NEG_ONE;\r\n } else {\r\n rem = this.sub(divisor.mul(approx));\r\n res = approx.add(rem.div(divisor));\r\n return res;\r\n }\r\n }\r\n } else if (divisor.eq(MIN_VALUE))\r\n return this.unsigned ? UZERO : ZERO;\r\n if (this.isNegative()) {\r\n if (divisor.isNegative())\r\n return this.neg().div(divisor.neg());\r\n return this.neg().div(divisor).neg();\r\n } else if (divisor.isNegative())\r\n return this.div(divisor.neg()).neg();\r\n res = ZERO;\r\n } else {\r\n // The algorithm below has not been made for unsigned longs. It's therefore\r\n // required to take special care of the MSB prior to running it.\r\n if (!divisor.unsigned)\r\n divisor = divisor.toUnsigned();\r\n if (divisor.gt(this))\r\n return UZERO;\r\n if (divisor.gt(this.shru(1))) // 15 >>> 1 = 7 ; with divisor = 8 ; true\r\n return UONE;\r\n res = UZERO;\r\n }\r\n\r\n // Repeat the following until the remainder is less than other: find a\r\n // floating-point that approximates remainder / other *from below*, add this\r\n // into the result, and subtract it from the remainder. It is critical that\r\n // the approximate value is less than or equal to the real value so that the\r\n // remainder never becomes negative.\r\n rem = this;\r\n while (rem.gte(divisor)) {\r\n // Approximate the result of division. This may be a little greater or\r\n // smaller than the actual value.\r\n approx = Math.max(1, Math.floor(rem.toNumber() / divisor.toNumber()));\r\n\r\n // We will tweak the approximate result by changing it in the 48-th digit or\r\n // the smallest non-fractional digit, whichever is larger.\r\n var log2 = Math.ceil(Math.log(approx) / Math.LN2),\r\n delta = (log2 <= 48) ? 1 : pow_dbl(2, log2 - 48),\r\n\r\n // Decrease the approximation until it is smaller than the remainder. Note\r\n // that if it is too large, the product overflows and is negative.\r\n approxRes = fromNumber(approx),\r\n approxRem = approxRes.mul(divisor);\r\n while (approxRem.isNegative() || approxRem.gt(rem)) {\r\n approx -= delta;\r\n approxRes = fromNumber(approx, this.unsigned);\r\n approxRem = approxRes.mul(divisor);\r\n }\r\n\r\n // We know the answer can't be zero... and actually, zero would cause\r\n // infinite recursion since we would make no progress.\r\n if (approxRes.isZero())\r\n approxRes = ONE;\r\n\r\n res = res.add(approxRes);\r\n rem = rem.sub(approxRem);\r\n }\r\n return res;\r\n};\r\n\r\n/**\r\n * Returns this Long divided by the specified. This is an alias of {@link Long#divide}.\r\n * @function\r\n * @param {!Long|number|string} divisor Divisor\r\n * @returns {!Long} Quotient\r\n */\r\nLongPrototype.div = LongPrototype.divide;\r\n\r\n/**\r\n * Returns this Long modulo the specified.\r\n * @param {!Long|number|string} divisor Divisor\r\n * @returns {!Long} Remainder\r\n */\r\nLongPrototype.modulo = function modulo(divisor) {\r\n if (!isLong(divisor))\r\n divisor = fromValue(divisor);\r\n\r\n // use wasm support if present\r\n if (wasm) {\r\n var low = (this.unsigned ? wasm.rem_u : wasm.rem_s)(\r\n this.low,\r\n this.high,\r\n divisor.low,\r\n divisor.high\r\n );\r\n return fromBits(low, wasm.get_high(), this.unsigned);\r\n }\r\n\r\n return this.sub(this.div(divisor).mul(divisor));\r\n};\r\n\r\n/**\r\n * Returns this Long modulo the specified. This is an alias of {@link Long#modulo}.\r\n * @function\r\n * @param {!Long|number|string} divisor Divisor\r\n * @returns {!Long} Remainder\r\n */\r\nLongPrototype.mod = LongPrototype.modulo;\r\n\r\n/**\r\n * Returns this Long modulo the specified. This is an alias of {@link Long#modulo}.\r\n * @function\r\n * @param {!Long|number|string} divisor Divisor\r\n * @returns {!Long} Remainder\r\n */\r\nLongPrototype.rem = LongPrototype.modulo;\r\n\r\n/**\r\n * Returns the bitwise NOT of this Long.\r\n * @returns {!Long}\r\n */\r\nLongPrototype.not = function not() {\r\n return fromBits(~this.low, ~this.high, this.unsigned);\r\n};\r\n\r\n/**\r\n * Returns the bitwise AND of this Long and the specified.\r\n * @param {!Long|number|string} other Other Long\r\n * @returns {!Long}\r\n */\r\nLongPrototype.and = function and(other) {\r\n if (!isLong(other))\r\n other = fromValue(other);\r\n return fromBits(this.low & other.low, this.high & other.high, this.unsigned);\r\n};\r\n\r\n/**\r\n * Returns the bitwise OR of this Long and the specified.\r\n * @param {!Long|number|string} other Other Long\r\n * @returns {!Long}\r\n */\r\nLongPrototype.or = function or(other) {\r\n if (!isLong(other))\r\n other = fromValue(other);\r\n return fromBits(this.low | other.low, this.high | other.high, this.unsigned);\r\n};\r\n\r\n/**\r\n * Returns the bitwise XOR of this Long and the given one.\r\n * @param {!Long|number|string} other Other Long\r\n * @returns {!Long}\r\n */\r\nLongPrototype.xor = function xor(other) {\r\n if (!isLong(other))\r\n other = fromValue(other);\r\n return fromBits(this.low ^ other.low, this.high ^ other.high, this.unsigned);\r\n};\r\n\r\n/**\r\n * Returns this Long with bits shifted to the left by the given amount.\r\n * @param {number|!Long} numBits Number of bits\r\n * @returns {!Long} Shifted Long\r\n */\r\nLongPrototype.shiftLeft = function shiftLeft(numBits) {\r\n if (isLong(numBits))\r\n numBits = numBits.toInt();\r\n if ((numBits &= 63) === 0)\r\n return this;\r\n else if (numBits < 32)\r\n return fromBits(this.low << numBits, (this.high << numBits) | (this.low >>> (32 - numBits)), this.unsigned);\r\n else\r\n return fromBits(0, this.low << (numBits - 32), this.unsigned);\r\n};\r\n\r\n/**\r\n * Returns this Long with bits shifted to the left by the given amount. This is an alias of {@link Long#shiftLeft}.\r\n * @function\r\n * @param {number|!Long} numBits Number of bits\r\n * @returns {!Long} Shifted Long\r\n */\r\nLongPrototype.shl = LongPrototype.shiftLeft;\r\n\r\n/**\r\n * Returns this Long with bits arithmetically shifted to the right by the given amount.\r\n * @param {number|!Long} numBits Number of bits\r\n * @returns {!Long} Shifted Long\r\n */\r\nLongPrototype.shiftRight = function shiftRight(numBits) {\r\n if (isLong(numBits))\r\n numBits = numBits.toInt();\r\n if ((numBits &= 63) === 0)\r\n return this;\r\n else if (numBits < 32)\r\n return fromBits((this.low >>> numBits) | (this.high << (32 - numBits)), this.high >> numBits, this.unsigned);\r\n else\r\n return fromBits(this.high >> (numBits - 32), this.high >= 0 ? 0 : -1, this.unsigned);\r\n};\r\n\r\n/**\r\n * Returns this Long with bits arithmetically shifted to the right by the given amount. This is an alias of {@link Long#shiftRight}.\r\n * @function\r\n * @param {number|!Long} numBits Number of bits\r\n * @returns {!Long} Shifted Long\r\n */\r\nLongPrototype.shr = LongPrototype.shiftRight;\r\n\r\n/**\r\n * Returns this Long with bits logically shifted to the right by the given amount.\r\n * @param {number|!Long} numBits Number of bits\r\n * @returns {!Long} Shifted Long\r\n */\r\nLongPrototype.shiftRightUnsigned = function shiftRightUnsigned(numBits) {\r\n if (isLong(numBits))\r\n numBits = numBits.toInt();\r\n numBits &= 63;\r\n if (numBits === 0)\r\n return this;\r\n else {\r\n var high = this.high;\r\n if (numBits < 32) {\r\n var low = this.low;\r\n return fromBits((low >>> numBits) | (high << (32 - numBits)), high >>> numBits, this.unsigned);\r\n } else if (numBits === 32)\r\n return fromBits(high, 0, this.unsigned);\r\n else\r\n return fromBits(high >>> (numBits - 32), 0, this.unsigned);\r\n }\r\n};\r\n\r\n/**\r\n * Returns this Long with bits logically shifted to the right by the given amount. This is an alias of {@link Long#shiftRightUnsigned}.\r\n * @function\r\n * @param {number|!Long} numBits Number of bits\r\n * @returns {!Long} Shifted Long\r\n */\r\nLongPrototype.shru = LongPrototype.shiftRightUnsigned;\r\n\r\n/**\r\n * Returns this Long with bits logically shifted to the right by the given amount. This is an alias of {@link Long#shiftRightUnsigned}.\r\n * @function\r\n * @param {number|!Long} numBits Number of bits\r\n * @returns {!Long} Shifted Long\r\n */\r\nLongPrototype.shr_u = LongPrototype.shiftRightUnsigned;\r\n\r\n/**\r\n * Converts this Long to signed.\r\n * @returns {!Long} Signed long\r\n */\r\nLongPrototype.toSigned = function toSigned() {\r\n if (!this.unsigned)\r\n return this;\r\n return fromBits(this.low, this.high, false);\r\n};\r\n\r\n/**\r\n * Converts this Long to unsigned.\r\n * @returns {!Long} Unsigned long\r\n */\r\nLongPrototype.toUnsigned = function toUnsigned() {\r\n if (this.unsigned)\r\n return this;\r\n return fromBits(this.low, this.high, true);\r\n};\r\n\r\n/**\r\n * Converts this Long to its byte representation.\r\n * @param {boolean=} le Whether little or big endian, defaults to big endian\r\n * @returns {!Array.} Byte representation\r\n */\r\nLongPrototype.toBytes = function toBytes(le) {\r\n return le ? this.toBytesLE() : this.toBytesBE();\r\n};\r\n\r\n/**\r\n * Converts this Long to its little endian byte representation.\r\n * @returns {!Array.} Little endian byte representation\r\n */\r\nLongPrototype.toBytesLE = function toBytesLE() {\r\n var hi = this.high,\r\n lo = this.low;\r\n return [\r\n lo & 0xff,\r\n lo >>> 8 & 0xff,\r\n lo >>> 16 & 0xff,\r\n lo >>> 24 ,\r\n hi & 0xff,\r\n hi >>> 8 & 0xff,\r\n hi >>> 16 & 0xff,\r\n hi >>> 24\r\n ];\r\n};\r\n\r\n/**\r\n * Converts this Long to its big endian byte representation.\r\n * @returns {!Array.} Big endian byte representation\r\n */\r\nLongPrototype.toBytesBE = function toBytesBE() {\r\n var hi = this.high,\r\n lo = this.low;\r\n return [\r\n hi >>> 24 ,\r\n hi >>> 16 & 0xff,\r\n hi >>> 8 & 0xff,\r\n hi & 0xff,\r\n lo >>> 24 ,\r\n lo >>> 16 & 0xff,\r\n lo >>> 8 & 0xff,\r\n lo & 0xff\r\n ];\r\n};\r\n\r\n/**\r\n * Creates a Long from its byte representation.\r\n * @param {!Array.} bytes Byte representation\r\n * @param {boolean=} unsigned Whether unsigned or not, defaults to signed\r\n * @param {boolean=} le Whether little or big endian, defaults to big endian\r\n * @returns {Long} The corresponding Long value\r\n */\r\nLong.fromBytes = function fromBytes(bytes, unsigned, le) {\r\n return le ? Long.fromBytesLE(bytes, unsigned) : Long.fromBytesBE(bytes, unsigned);\r\n};\r\n\r\n/**\r\n * Creates a Long from its little endian byte representation.\r\n * @param {!Array.} bytes Little endian byte representation\r\n * @param {boolean=} unsigned Whether unsigned or not, defaults to signed\r\n * @returns {Long} The corresponding Long value\r\n */\r\nLong.fromBytesLE = function fromBytesLE(bytes, unsigned) {\r\n return new Long(\r\n bytes[0] |\r\n bytes[1] << 8 |\r\n bytes[2] << 16 |\r\n bytes[3] << 24,\r\n bytes[4] |\r\n bytes[5] << 8 |\r\n bytes[6] << 16 |\r\n bytes[7] << 24,\r\n unsigned\r\n );\r\n};\r\n\r\n/**\r\n * Creates a Long from its big endian byte representation.\r\n * @param {!Array.} bytes Big endian byte representation\r\n * @param {boolean=} unsigned Whether unsigned or not, defaults to signed\r\n * @returns {Long} The corresponding Long value\r\n */\r\nLong.fromBytesBE = function fromBytesBE(bytes, unsigned) {\r\n return new Long(\r\n bytes[4] << 24 |\r\n bytes[5] << 16 |\r\n bytes[6] << 8 |\r\n bytes[7],\r\n bytes[0] << 24 |\r\n bytes[1] << 16 |\r\n bytes[2] << 8 |\r\n bytes[3],\r\n unsigned\r\n );\r\n};\r\n", "", "// A port of an algorithm by Johannes Baag\u00F8e , 2010\n// http://baagoe.com/en/RandomMusings/javascript/\n// https://github.com/nquinlan/better-random-numbers-for-javascript-mirror\n// Original work is under MIT license -\n\n// Copyright (C) 2010 by Johannes Baag\u00F8e \n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n// \n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n// \n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\n\n\n(function(global, module, define) {\n\nfunction Alea(seed) {\n var me = this, mash = Mash();\n\n me.next = function() {\n var t = 2091639 * me.s0 + me.c * 2.3283064365386963e-10; // 2^-32\n me.s0 = me.s1;\n me.s1 = me.s2;\n return me.s2 = t - (me.c = t | 0);\n };\n\n // Apply the seeding algorithm from Baagoe.\n me.c = 1;\n me.s0 = mash(' ');\n me.s1 = mash(' ');\n me.s2 = mash(' ');\n me.s0 -= mash(seed);\n if (me.s0 < 0) { me.s0 += 1; }\n me.s1 -= mash(seed);\n if (me.s1 < 0) { me.s1 += 1; }\n me.s2 -= mash(seed);\n if (me.s2 < 0) { me.s2 += 1; }\n mash = null;\n}\n\nfunction copy(f, t) {\n t.c = f.c;\n t.s0 = f.s0;\n t.s1 = f.s1;\n t.s2 = f.s2;\n return t;\n}\n\nfunction impl(seed, opts) {\n var xg = new Alea(seed),\n state = opts && opts.state,\n prng = xg.next;\n prng.int32 = function() { return (xg.next() * 0x100000000) | 0; }\n prng.double = function() {\n return prng() + (prng() * 0x200000 | 0) * 1.1102230246251565e-16; // 2^-53\n };\n prng.quick = prng;\n if (state) {\n if (typeof(state) == 'object') copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nfunction Mash() {\n var n = 0xefc8249d;\n\n var mash = function(data) {\n data = data.toString();\n for (var i = 0; i < data.length; i++) {\n n += data.charCodeAt(i);\n var h = 0.02519603282416938 * n;\n n = h >>> 0;\n h -= n;\n h *= n;\n n = h >>> 0;\n h -= n;\n n += h * 0x100000000; // 2^32\n }\n return (n >>> 0) * 2.3283064365386963e-10; // 2^-32\n };\n\n return mash;\n}\n\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.alea = impl;\n}\n\n})(\n this,\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n\n\n", "// A Javascript implementaion of the \"xor128\" prng algorithm by\n// George Marsaglia. See http://www.jstatsoft.org/v08/i14/paper\n\n(function(global, module, define) {\n\nfunction XorGen(seed) {\n var me = this, strseed = '';\n\n me.x = 0;\n me.y = 0;\n me.z = 0;\n me.w = 0;\n\n // Set up generator function.\n me.next = function() {\n var t = me.x ^ (me.x << 11);\n me.x = me.y;\n me.y = me.z;\n me.z = me.w;\n return me.w ^= (me.w >>> 19) ^ t ^ (t >>> 8);\n };\n\n if (seed === (seed | 0)) {\n // Integer seed.\n me.x = seed;\n } else {\n // String seed.\n strseed += seed;\n }\n\n // Mix in string seed, then discard an initial batch of 64 values.\n for (var k = 0; k < strseed.length + 64; k++) {\n me.x ^= strseed.charCodeAt(k) | 0;\n me.next();\n }\n}\n\nfunction copy(f, t) {\n t.x = f.x;\n t.y = f.y;\n t.z = f.z;\n t.w = f.w;\n return t;\n}\n\nfunction impl(seed, opts) {\n var xg = new XorGen(seed),\n state = opts && opts.state,\n prng = function() { return (xg.next() >>> 0) / 0x100000000; };\n prng.double = function() {\n do {\n var top = xg.next() >>> 11,\n bot = (xg.next() >>> 0) / 0x100000000,\n result = (top + bot) / (1 << 21);\n } while (result === 0);\n return result;\n };\n prng.int32 = xg.next;\n prng.quick = prng;\n if (state) {\n if (typeof(state) == 'object') copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.xor128 = impl;\n}\n\n})(\n this,\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n\n\n", "// A Javascript implementaion of the \"xorwow\" prng algorithm by\n// George Marsaglia. See http://www.jstatsoft.org/v08/i14/paper\n\n(function(global, module, define) {\n\nfunction XorGen(seed) {\n var me = this, strseed = '';\n\n // Set up generator function.\n me.next = function() {\n var t = (me.x ^ (me.x >>> 2));\n me.x = me.y; me.y = me.z; me.z = me.w; me.w = me.v;\n return (me.d = (me.d + 362437 | 0)) +\n (me.v = (me.v ^ (me.v << 4)) ^ (t ^ (t << 1))) | 0;\n };\n\n me.x = 0;\n me.y = 0;\n me.z = 0;\n me.w = 0;\n me.v = 0;\n\n if (seed === (seed | 0)) {\n // Integer seed.\n me.x = seed;\n } else {\n // String seed.\n strseed += seed;\n }\n\n // Mix in string seed, then discard an initial batch of 64 values.\n for (var k = 0; k < strseed.length + 64; k++) {\n me.x ^= strseed.charCodeAt(k) | 0;\n if (k == strseed.length) {\n me.d = me.x << 10 ^ me.x >>> 4;\n }\n me.next();\n }\n}\n\nfunction copy(f, t) {\n t.x = f.x;\n t.y = f.y;\n t.z = f.z;\n t.w = f.w;\n t.v = f.v;\n t.d = f.d;\n return t;\n}\n\nfunction impl(seed, opts) {\n var xg = new XorGen(seed),\n state = opts && opts.state,\n prng = function() { return (xg.next() >>> 0) / 0x100000000; };\n prng.double = function() {\n do {\n var top = xg.next() >>> 11,\n bot = (xg.next() >>> 0) / 0x100000000,\n result = (top + bot) / (1 << 21);\n } while (result === 0);\n return result;\n };\n prng.int32 = xg.next;\n prng.quick = prng;\n if (state) {\n if (typeof(state) == 'object') copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.xorwow = impl;\n}\n\n})(\n this,\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n\n\n", "// A Javascript implementaion of the \"xorshift7\" algorithm by\n// Fran\u00E7ois Panneton and Pierre L'ecuyer:\n// \"On the Xorgshift Random Number Generators\"\n// http://saluc.engr.uconn.edu/refs/crypto/rng/panneton05onthexorshift.pdf\n\n(function(global, module, define) {\n\nfunction XorGen(seed) {\n var me = this;\n\n // Set up generator function.\n me.next = function() {\n // Update xor generator.\n var X = me.x, i = me.i, t, v, w;\n t = X[i]; t ^= (t >>> 7); v = t ^ (t << 24);\n t = X[(i + 1) & 7]; v ^= t ^ (t >>> 10);\n t = X[(i + 3) & 7]; v ^= t ^ (t >>> 3);\n t = X[(i + 4) & 7]; v ^= t ^ (t << 7);\n t = X[(i + 7) & 7]; t = t ^ (t << 13); v ^= t ^ (t << 9);\n X[i] = v;\n me.i = (i + 1) & 7;\n return v;\n };\n\n function init(me, seed) {\n var j, w, X = [];\n\n if (seed === (seed | 0)) {\n // Seed state array using a 32-bit integer.\n w = X[0] = seed;\n } else {\n // Seed state using a string.\n seed = '' + seed;\n for (j = 0; j < seed.length; ++j) {\n X[j & 7] = (X[j & 7] << 15) ^\n (seed.charCodeAt(j) + X[(j + 1) & 7] << 13);\n }\n }\n // Enforce an array length of 8, not all zeroes.\n while (X.length < 8) X.push(0);\n for (j = 0; j < 8 && X[j] === 0; ++j);\n if (j == 8) w = X[7] = -1; else w = X[j];\n\n me.x = X;\n me.i = 0;\n\n // Discard an initial 256 values.\n for (j = 256; j > 0; --j) {\n me.next();\n }\n }\n\n init(me, seed);\n}\n\nfunction copy(f, t) {\n t.x = f.x.slice();\n t.i = f.i;\n return t;\n}\n\nfunction impl(seed, opts) {\n if (seed == null) seed = +(new Date);\n var xg = new XorGen(seed),\n state = opts && opts.state,\n prng = function() { return (xg.next() >>> 0) / 0x100000000; };\n prng.double = function() {\n do {\n var top = xg.next() >>> 11,\n bot = (xg.next() >>> 0) / 0x100000000,\n result = (top + bot) / (1 << 21);\n } while (result === 0);\n return result;\n };\n prng.int32 = xg.next;\n prng.quick = prng;\n if (state) {\n if (state.x) copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.xorshift7 = impl;\n}\n\n})(\n this,\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n\n", "// A Javascript implementaion of Richard Brent's Xorgens xor4096 algorithm.\n//\n// This fast non-cryptographic random number generator is designed for\n// use in Monte-Carlo algorithms. It combines a long-period xorshift\n// generator with a Weyl generator, and it passes all common batteries\n// of stasticial tests for randomness while consuming only a few nanoseconds\n// for each prng generated. For background on the generator, see Brent's\n// paper: \"Some long-period random number generators using shifts and xors.\"\n// http://arxiv.org/pdf/1004.3115v1.pdf\n//\n// Usage:\n//\n// var xor4096 = require('xor4096');\n// random = xor4096(1); // Seed with int32 or string.\n// assert.equal(random(), 0.1520436450538547); // (0, 1) range, 53 bits.\n// assert.equal(random.int32(), 1806534897); // signed int32, 32 bits.\n//\n// For nonzero numeric keys, this impelementation provides a sequence\n// identical to that by Brent's xorgens 3 implementaion in C. This\n// implementation also provides for initalizing the generator with\n// string seeds, or for saving and restoring the state of the generator.\n//\n// On Chrome, this prng benchmarks about 2.1 times slower than\n// Javascript's built-in Math.random().\n\n(function(global, module, define) {\n\nfunction XorGen(seed) {\n var me = this;\n\n // Set up generator function.\n me.next = function() {\n var w = me.w,\n X = me.X, i = me.i, t, v;\n // Update Weyl generator.\n me.w = w = (w + 0x61c88647) | 0;\n // Update xor generator.\n v = X[(i + 34) & 127];\n t = X[i = ((i + 1) & 127)];\n v ^= v << 13;\n t ^= t << 17;\n v ^= v >>> 15;\n t ^= t >>> 12;\n // Update Xor generator array state.\n v = X[i] = v ^ t;\n me.i = i;\n // Result is the combination.\n return (v + (w ^ (w >>> 16))) | 0;\n };\n\n function init(me, seed) {\n var t, v, i, j, w, X = [], limit = 128;\n if (seed === (seed | 0)) {\n // Numeric seeds initialize v, which is used to generates X.\n v = seed;\n seed = null;\n } else {\n // String seeds are mixed into v and X one character at a time.\n seed = seed + '\\0';\n v = 0;\n limit = Math.max(limit, seed.length);\n }\n // Initialize circular array and weyl value.\n for (i = 0, j = -32; j < limit; ++j) {\n // Put the unicode characters into the array, and shuffle them.\n if (seed) v ^= seed.charCodeAt((j + 32) % seed.length);\n // After 32 shuffles, take v as the starting w value.\n if (j === 0) w = v;\n v ^= v << 10;\n v ^= v >>> 15;\n v ^= v << 4;\n v ^= v >>> 13;\n if (j >= 0) {\n w = (w + 0x61c88647) | 0; // Weyl.\n t = (X[j & 127] ^= (v + w)); // Combine xor and weyl to init array.\n i = (0 == t) ? i + 1 : 0; // Count zeroes.\n }\n }\n // We have detected all zeroes; make the key nonzero.\n if (i >= 128) {\n X[(seed && seed.length || 0) & 127] = -1;\n }\n // Run the generator 512 times to further mix the state before using it.\n // Factoring this as a function slows the main generator, so it is just\n // unrolled here. The weyl generator is not advanced while warming up.\n i = 127;\n for (j = 4 * 128; j > 0; --j) {\n v = X[(i + 34) & 127];\n t = X[i = ((i + 1) & 127)];\n v ^= v << 13;\n t ^= t << 17;\n v ^= v >>> 15;\n t ^= t >>> 12;\n X[i] = v ^ t;\n }\n // Storing state as object members is faster than using closure variables.\n me.w = w;\n me.X = X;\n me.i = i;\n }\n\n init(me, seed);\n}\n\nfunction copy(f, t) {\n t.i = f.i;\n t.w = f.w;\n t.X = f.X.slice();\n return t;\n};\n\nfunction impl(seed, opts) {\n if (seed == null) seed = +(new Date);\n var xg = new XorGen(seed),\n state = opts && opts.state,\n prng = function() { return (xg.next() >>> 0) / 0x100000000; };\n prng.double = function() {\n do {\n var top = xg.next() >>> 11,\n bot = (xg.next() >>> 0) / 0x100000000,\n result = (top + bot) / (1 << 21);\n } while (result === 0);\n return result;\n };\n prng.int32 = xg.next;\n prng.quick = prng;\n if (state) {\n if (state.X) copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.xor4096 = impl;\n}\n\n})(\n this, // window object or global\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n", "// A Javascript implementaion of the \"Tyche-i\" prng algorithm by\n// Samuel Neves and Filipe Araujo.\n// See https://eden.dei.uc.pt/~sneves/pubs/2011-snfa2.pdf\n\n(function(global, module, define) {\n\nfunction XorGen(seed) {\n var me = this, strseed = '';\n\n // Set up generator function.\n me.next = function() {\n var b = me.b, c = me.c, d = me.d, a = me.a;\n b = (b << 25) ^ (b >>> 7) ^ c;\n c = (c - d) | 0;\n d = (d << 24) ^ (d >>> 8) ^ a;\n a = (a - b) | 0;\n me.b = b = (b << 20) ^ (b >>> 12) ^ c;\n me.c = c = (c - d) | 0;\n me.d = (d << 16) ^ (c >>> 16) ^ a;\n return me.a = (a - b) | 0;\n };\n\n /* The following is non-inverted tyche, which has better internal\n * bit diffusion, but which is about 25% slower than tyche-i in JS.\n me.next = function() {\n var a = me.a, b = me.b, c = me.c, d = me.d;\n a = (me.a + me.b | 0) >>> 0;\n d = me.d ^ a; d = d << 16 ^ d >>> 16;\n c = me.c + d | 0;\n b = me.b ^ c; b = b << 12 ^ d >>> 20;\n me.a = a = a + b | 0;\n d = d ^ a; me.d = d = d << 8 ^ d >>> 24;\n me.c = c = c + d | 0;\n b = b ^ c;\n return me.b = (b << 7 ^ b >>> 25);\n }\n */\n\n me.a = 0;\n me.b = 0;\n me.c = 2654435769 | 0;\n me.d = 1367130551;\n\n if (seed === Math.floor(seed)) {\n // Integer seed.\n me.a = (seed / 0x100000000) | 0;\n me.b = seed | 0;\n } else {\n // String seed.\n strseed += seed;\n }\n\n // Mix in string seed, then discard an initial batch of 64 values.\n for (var k = 0; k < strseed.length + 20; k++) {\n me.b ^= strseed.charCodeAt(k) | 0;\n me.next();\n }\n}\n\nfunction copy(f, t) {\n t.a = f.a;\n t.b = f.b;\n t.c = f.c;\n t.d = f.d;\n return t;\n};\n\nfunction impl(seed, opts) {\n var xg = new XorGen(seed),\n state = opts && opts.state,\n prng = function() { return (xg.next() >>> 0) / 0x100000000; };\n prng.double = function() {\n do {\n var top = xg.next() >>> 11,\n bot = (xg.next() >>> 0) / 0x100000000,\n result = (top + bot) / (1 << 21);\n } while (result === 0);\n return result;\n };\n prng.int32 = xg.next;\n prng.quick = prng;\n if (state) {\n if (typeof(state) == 'object') copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.tychei = impl;\n}\n\n})(\n this,\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n\n\n", "", "/*\nCopyright 2014 David Bau.\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\nTORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\nSOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n*/\n\n(function (pool, math) {\n//\n// The following constants are related to IEEE 754 limits.\n//\nvar global = this,\n width = 256, // each RC4 output is 0 <= x < 256\n chunks = 6, // at least six RC4 outputs for each double\n digits = 52, // there are 52 significant digits in a double\n rngname = 'random', // rngname: name for Math.random and Math.seedrandom\n startdenom = math.pow(width, chunks),\n significance = math.pow(2, digits),\n overflow = significance * 2,\n mask = width - 1,\n nodecrypto; // node.js crypto module, initialized at the bottom.\n\n//\n// seedrandom()\n// This is the seedrandom function described above.\n//\nfunction seedrandom(seed, options, callback) {\n var key = [];\n options = (options == true) ? { entropy: true } : (options || {});\n\n // Flatten the seed string or build one from local entropy if needed.\n var shortseed = mixkey(flatten(\n options.entropy ? [seed, tostring(pool)] :\n (seed == null) ? autoseed() : seed, 3), key);\n\n // Use the seed to initialize an ARC4 generator.\n var arc4 = new ARC4(key);\n\n // This function returns a random double in [0, 1) that contains\n // randomness in every bit of the mantissa of the IEEE 754 value.\n var prng = function() {\n var n = arc4.g(chunks), // Start with a numerator n < 2 ^ 48\n d = startdenom, // and denominator d = 2 ^ 48.\n x = 0; // and no 'extra last byte'.\n while (n < significance) { // Fill up all significant digits by\n n = (n + x) * width; // shifting numerator and\n d *= width; // denominator and generating a\n x = arc4.g(1); // new least-significant-byte.\n }\n while (n >= overflow) { // To avoid rounding up, before adding\n n /= 2; // last byte, shift everything\n d /= 2; // right using integer math until\n x >>>= 1; // we have exactly the desired bits.\n }\n return (n + x) / d; // Form the number within [0, 1).\n };\n\n prng.int32 = function() { return arc4.g(4) | 0; }\n prng.quick = function() { return arc4.g(4) / 0x100000000; }\n prng.double = prng;\n\n // Mix the randomness into accumulated entropy.\n mixkey(tostring(arc4.S), pool);\n\n // Calling convention: what to return as a function of prng, seed, is_math.\n return (options.pass || callback ||\n function(prng, seed, is_math_call, state) {\n if (state) {\n // Load the arc4 state from the given state if it has an S array.\n if (state.S) { copy(state, arc4); }\n // Only provide the .state method if requested via options.state.\n prng.state = function() { return copy(arc4, {}); }\n }\n\n // If called as a method of Math (Math.seedrandom()), mutate\n // Math.random because that is how seedrandom.js has worked since v1.0.\n if (is_math_call) { math[rngname] = prng; return seed; }\n\n // Otherwise, it is a newer calling convention, so return the\n // prng directly.\n else return prng;\n })(\n prng,\n shortseed,\n 'global' in options ? options.global : (this == math),\n options.state);\n}\nmath['seed' + rngname] = seedrandom;\n\n//\n// ARC4\n//\n// An ARC4 implementation. The constructor takes a key in the form of\n// an array of at most (width) integers that should be 0 <= x < (width).\n//\n// The g(count) method returns a pseudorandom integer that concatenates\n// the next (count) outputs from ARC4. Its return value is a number x\n// that is in the range 0 <= x < (width ^ count).\n//\nfunction ARC4(key) {\n var t, keylen = key.length,\n me = this, i = 0, j = me.i = me.j = 0, s = me.S = [];\n\n // The empty key [] is treated as [0].\n if (!keylen) { key = [keylen++]; }\n\n // Set up S using the standard key scheduling algorithm.\n while (i < width) {\n s[i] = i++;\n }\n for (i = 0; i < width; i++) {\n s[i] = s[j = mask & (j + key[i % keylen] + (t = s[i]))];\n s[j] = t;\n }\n\n // The \"g\" method returns the next (count) outputs as one number.\n (me.g = function(count) {\n // Using instance members instead of closure state nearly doubles speed.\n var t, r = 0,\n i = me.i, j = me.j, s = me.S;\n while (count--) {\n t = s[i = mask & (i + 1)];\n r = r * width + s[mask & ((s[i] = s[j = mask & (j + t)]) + (s[j] = t))];\n }\n me.i = i; me.j = j;\n return r;\n // For robust unpredictability, the function call below automatically\n // discards an initial batch of values. This is called RC4-drop[256].\n // See http://google.com/search?q=rsa+fluhrer+response&btnI\n })(width);\n}\n\n//\n// copy()\n// Copies internal state of ARC4 to or from a plain object.\n//\nfunction copy(f, t) {\n t.i = f.i;\n t.j = f.j;\n t.S = f.S.slice();\n return t;\n};\n\n//\n// flatten()\n// Converts an object tree to nested arrays of strings.\n//\nfunction flatten(obj, depth) {\n var result = [], typ = (typeof obj), prop;\n if (depth && typ == 'object') {\n for (prop in obj) {\n try { result.push(flatten(obj[prop], depth - 1)); } catch (e) {}\n }\n }\n return (result.length ? result : typ == 'string' ? obj : obj + '\\0');\n}\n\n//\n// mixkey()\n// Mixes a string seed into a key that is an array of integers, and\n// returns a shortened string seed that is equivalent to the result key.\n//\nfunction mixkey(seed, key) {\n var stringseed = seed + '', smear, j = 0;\n while (j < stringseed.length) {\n key[mask & j] =\n mask & ((smear ^= key[mask & j] * 19) + stringseed.charCodeAt(j++));\n }\n return tostring(key);\n}\n\n//\n// autoseed()\n// Returns an object for autoseeding, using window.crypto and Node crypto\n// module if available.\n//\nfunction autoseed() {\n try {\n var out;\n if (nodecrypto && (out = nodecrypto.randomBytes)) {\n // The use of 'out' to remember randomBytes makes tight minified code.\n out = out(width);\n } else {\n out = new Uint8Array(width);\n (global.crypto || global.msCrypto).getRandomValues(out);\n }\n return tostring(out);\n } catch (e) {\n var browser = global.navigator,\n plugins = browser && browser.plugins;\n return [+new Date, global, plugins, global.screen, tostring(pool)];\n }\n}\n\n//\n// tostring()\n// Converts an array of charcodes to a string\n//\nfunction tostring(a) {\n return String.fromCharCode.apply(0, a);\n}\n\n//\n// When seedrandom.js is loaded, we immediately mix a few bits\n// from the built-in RNG into the entropy pool. Because we do\n// not want to interfere with deterministic PRNG state later,\n// seedrandom will not call math.random on its own again after\n// initialization.\n//\nmixkey(math.random(), pool);\n\n//\n// Nodejs and AMD support: export the implementation as a module using\n// either convention.\n//\nif ((typeof module) == 'object' && module.exports) {\n module.exports = seedrandom;\n // When in node.js, try using crypto package for autoseeding.\n try {\n nodecrypto = require('crypto');\n } catch (ex) {}\n} else if ((typeof define) == 'function' && define.amd) {\n define(function() { return seedrandom; });\n}\n\n// End anonymous scope, and pass initial values.\n})(\n [], // pool: entropy pool starts empty\n Math // math: package containing random, pow, and seedrandom\n);\n", "// A library of seedable RNGs implemented in Javascript.\n//\n// Usage:\n//\n// var seedrandom = require('seedrandom');\n// var random = seedrandom(1); // or any seed.\n// var x = random(); // 0 <= x < 1. Every bit is random.\n// var x = random.quick(); // 0 <= x < 1. 32 bits of randomness.\n\n// alea, a 53-bit multiply-with-carry generator by Johannes Baag\u00F8e.\n// Period: ~2^116\n// Reported to pass all BigCrush tests.\nvar alea = require('./lib/alea');\n\n// xor128, a pure xor-shift generator by George Marsaglia.\n// Period: 2^128-1.\n// Reported to fail: MatrixRank and LinearComp.\nvar xor128 = require('./lib/xor128');\n\n// xorwow, George Marsaglia's 160-bit xor-shift combined plus weyl.\n// Period: 2^192-2^32\n// Reported to fail: CollisionOver, SimpPoker, and LinearComp.\nvar xorwow = require('./lib/xorwow');\n\n// xorshift7, by Fran\u00E7ois Panneton and Pierre L'ecuyer, takes\n// a different approach: it adds robustness by allowing more shifts\n// than Marsaglia's original three. It is a 7-shift generator\n// with 256 bits, that passes BigCrush with no systmatic failures.\n// Period 2^256-1.\n// No systematic BigCrush failures reported.\nvar xorshift7 = require('./lib/xorshift7');\n\n// xor4096, by Richard Brent, is a 4096-bit xor-shift with a\n// very long period that also adds a Weyl generator. It also passes\n// BigCrush with no systematic failures. Its long period may\n// be useful if you have many generators and need to avoid\n// collisions.\n// Period: 2^4128-2^32.\n// No systematic BigCrush failures reported.\nvar xor4096 = require('./lib/xor4096');\n\n// Tyche-i, by Samuel Neves and Filipe Araujo, is a bit-shifting random\n// number generator derived from ChaCha, a modern stream cipher.\n// https://eden.dei.uc.pt/~sneves/pubs/2011-snfa2.pdf\n// Period: ~2^127\n// No systematic BigCrush failures reported.\nvar tychei = require('./lib/tychei');\n\n// The original ARC4-based prng included in this library.\n// Period: ~2^1600\nvar sr = require('./seedrandom');\n\nsr.alea = alea;\nsr.xor128 = xor128;\nsr.xorwow = xorwow;\nsr.xorshift7 = xorshift7;\nsr.xor4096 = xor4096;\nsr.tychei = tychei;\n\nmodule.exports = sr;\n", "// A port of an algorithm by Johannes Baag\u00F8e , 2010\n// http://baagoe.com/en/RandomMusings/javascript/\n// https://github.com/nquinlan/better-random-numbers-for-javascript-mirror\n// Original work is under MIT license -\n\n// Copyright (C) 2010 by Johannes Baag\u00F8e \n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\n\n\n(function(global, module, define) {\n\nfunction Alea(seed) {\n var me = this, mash = Mash();\n\n me.next = function() {\n var t = 2091639 * me.s0 + me.c * 2.3283064365386963e-10; // 2^-32\n me.s0 = me.s1;\n me.s1 = me.s2;\n return me.s2 = t - (me.c = t | 0);\n };\n\n // Apply the seeding algorithm from Baagoe.\n me.c = 1;\n me.s0 = mash(' ');\n me.s1 = mash(' ');\n me.s2 = mash(' ');\n me.s0 -= mash(seed);\n if (me.s0 < 0) { me.s0 += 1; }\n me.s1 -= mash(seed);\n if (me.s1 < 0) { me.s1 += 1; }\n me.s2 -= mash(seed);\n if (me.s2 < 0) { me.s2 += 1; }\n mash = null;\n}\n\nfunction copy(f, t) {\n t.c = f.c;\n t.s0 = f.s0;\n t.s1 = f.s1;\n t.s2 = f.s2;\n return t;\n}\n\nfunction impl(seed, opts) {\n var xg = new Alea(seed),\n state = opts && opts.state,\n prng = xg.next;\n prng.int32 = function() { return (xg.next() * 0x100000000) | 0; }\n prng.double = function() {\n return prng() + (prng() * 0x200000 | 0) * 1.1102230246251565e-16; // 2^-53\n };\n prng.quick = prng;\n if (state) {\n if (typeof(state) == 'object') copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nfunction Mash() {\n var n = 0xefc8249d;\n\n var mash = function(data) {\n data = String(data);\n for (var i = 0; i < data.length; i++) {\n n += data.charCodeAt(i);\n var h = 0.02519603282416938 * n;\n n = h >>> 0;\n h -= n;\n h *= n;\n n = h >>> 0;\n h -= n;\n n += h * 0x100000000; // 2^32\n }\n return (n >>> 0) * 2.3283064365386963e-10; // 2^-32\n };\n\n return mash;\n}\n\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.alea = impl;\n}\n\n})(\n this,\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n\n\n", "// A Javascript implementaion of the \"xor128\" prng algorithm by\n// George Marsaglia. See http://www.jstatsoft.org/v08/i14/paper\n\n(function(global, module, define) {\n\nfunction XorGen(seed) {\n var me = this, strseed = '';\n\n me.x = 0;\n me.y = 0;\n me.z = 0;\n me.w = 0;\n\n // Set up generator function.\n me.next = function() {\n var t = me.x ^ (me.x << 11);\n me.x = me.y;\n me.y = me.z;\n me.z = me.w;\n return me.w ^= (me.w >>> 19) ^ t ^ (t >>> 8);\n };\n\n if (seed === (seed | 0)) {\n // Integer seed.\n me.x = seed;\n } else {\n // String seed.\n strseed += seed;\n }\n\n // Mix in string seed, then discard an initial batch of 64 values.\n for (var k = 0; k < strseed.length + 64; k++) {\n me.x ^= strseed.charCodeAt(k) | 0;\n me.next();\n }\n}\n\nfunction copy(f, t) {\n t.x = f.x;\n t.y = f.y;\n t.z = f.z;\n t.w = f.w;\n return t;\n}\n\nfunction impl(seed, opts) {\n var xg = new XorGen(seed),\n state = opts && opts.state,\n prng = function() { return (xg.next() >>> 0) / 0x100000000; };\n prng.double = function() {\n do {\n var top = xg.next() >>> 11,\n bot = (xg.next() >>> 0) / 0x100000000,\n result = (top + bot) / (1 << 21);\n } while (result === 0);\n return result;\n };\n prng.int32 = xg.next;\n prng.quick = prng;\n if (state) {\n if (typeof(state) == 'object') copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.xor128 = impl;\n}\n\n})(\n this,\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n\n\n", "// A Javascript implementaion of the \"xorwow\" prng algorithm by\n// George Marsaglia. See http://www.jstatsoft.org/v08/i14/paper\n\n(function(global, module, define) {\n\nfunction XorGen(seed) {\n var me = this, strseed = '';\n\n // Set up generator function.\n me.next = function() {\n var t = (me.x ^ (me.x >>> 2));\n me.x = me.y; me.y = me.z; me.z = me.w; me.w = me.v;\n return (me.d = (me.d + 362437 | 0)) +\n (me.v = (me.v ^ (me.v << 4)) ^ (t ^ (t << 1))) | 0;\n };\n\n me.x = 0;\n me.y = 0;\n me.z = 0;\n me.w = 0;\n me.v = 0;\n\n if (seed === (seed | 0)) {\n // Integer seed.\n me.x = seed;\n } else {\n // String seed.\n strseed += seed;\n }\n\n // Mix in string seed, then discard an initial batch of 64 values.\n for (var k = 0; k < strseed.length + 64; k++) {\n me.x ^= strseed.charCodeAt(k) | 0;\n if (k == strseed.length) {\n me.d = me.x << 10 ^ me.x >>> 4;\n }\n me.next();\n }\n}\n\nfunction copy(f, t) {\n t.x = f.x;\n t.y = f.y;\n t.z = f.z;\n t.w = f.w;\n t.v = f.v;\n t.d = f.d;\n return t;\n}\n\nfunction impl(seed, opts) {\n var xg = new XorGen(seed),\n state = opts && opts.state,\n prng = function() { return (xg.next() >>> 0) / 0x100000000; };\n prng.double = function() {\n do {\n var top = xg.next() >>> 11,\n bot = (xg.next() >>> 0) / 0x100000000,\n result = (top + bot) / (1 << 21);\n } while (result === 0);\n return result;\n };\n prng.int32 = xg.next;\n prng.quick = prng;\n if (state) {\n if (typeof(state) == 'object') copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.xorwow = impl;\n}\n\n})(\n this,\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n\n\n", "// A Javascript implementaion of the \"xorshift7\" algorithm by\n// Fran\u00E7ois Panneton and Pierre L'ecuyer:\n// \"On the Xorgshift Random Number Generators\"\n// http://saluc.engr.uconn.edu/refs/crypto/rng/panneton05onthexorshift.pdf\n\n(function(global, module, define) {\n\nfunction XorGen(seed) {\n var me = this;\n\n // Set up generator function.\n me.next = function() {\n // Update xor generator.\n var X = me.x, i = me.i, t, v, w;\n t = X[i]; t ^= (t >>> 7); v = t ^ (t << 24);\n t = X[(i + 1) & 7]; v ^= t ^ (t >>> 10);\n t = X[(i + 3) & 7]; v ^= t ^ (t >>> 3);\n t = X[(i + 4) & 7]; v ^= t ^ (t << 7);\n t = X[(i + 7) & 7]; t = t ^ (t << 13); v ^= t ^ (t << 9);\n X[i] = v;\n me.i = (i + 1) & 7;\n return v;\n };\n\n function init(me, seed) {\n var j, w, X = [];\n\n if (seed === (seed | 0)) {\n // Seed state array using a 32-bit integer.\n w = X[0] = seed;\n } else {\n // Seed state using a string.\n seed = '' + seed;\n for (j = 0; j < seed.length; ++j) {\n X[j & 7] = (X[j & 7] << 15) ^\n (seed.charCodeAt(j) + X[(j + 1) & 7] << 13);\n }\n }\n // Enforce an array length of 8, not all zeroes.\n while (X.length < 8) X.push(0);\n for (j = 0; j < 8 && X[j] === 0; ++j);\n if (j == 8) w = X[7] = -1; else w = X[j];\n\n me.x = X;\n me.i = 0;\n\n // Discard an initial 256 values.\n for (j = 256; j > 0; --j) {\n me.next();\n }\n }\n\n init(me, seed);\n}\n\nfunction copy(f, t) {\n t.x = f.x.slice();\n t.i = f.i;\n return t;\n}\n\nfunction impl(seed, opts) {\n if (seed == null) seed = +(new Date);\n var xg = new XorGen(seed),\n state = opts && opts.state,\n prng = function() { return (xg.next() >>> 0) / 0x100000000; };\n prng.double = function() {\n do {\n var top = xg.next() >>> 11,\n bot = (xg.next() >>> 0) / 0x100000000,\n result = (top + bot) / (1 << 21);\n } while (result === 0);\n return result;\n };\n prng.int32 = xg.next;\n prng.quick = prng;\n if (state) {\n if (state.x) copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.xorshift7 = impl;\n}\n\n})(\n this,\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n\n", "// A Javascript implementaion of Richard Brent's Xorgens xor4096 algorithm.\n//\n// This fast non-cryptographic random number generator is designed for\n// use in Monte-Carlo algorithms. It combines a long-period xorshift\n// generator with a Weyl generator, and it passes all common batteries\n// of stasticial tests for randomness while consuming only a few nanoseconds\n// for each prng generated. For background on the generator, see Brent's\n// paper: \"Some long-period random number generators using shifts and xors.\"\n// http://arxiv.org/pdf/1004.3115v1.pdf\n//\n// Usage:\n//\n// var xor4096 = require('xor4096');\n// random = xor4096(1); // Seed with int32 or string.\n// assert.equal(random(), 0.1520436450538547); // (0, 1) range, 53 bits.\n// assert.equal(random.int32(), 1806534897); // signed int32, 32 bits.\n//\n// For nonzero numeric keys, this impelementation provides a sequence\n// identical to that by Brent's xorgens 3 implementaion in C. This\n// implementation also provides for initalizing the generator with\n// string seeds, or for saving and restoring the state of the generator.\n//\n// On Chrome, this prng benchmarks about 2.1 times slower than\n// Javascript's built-in Math.random().\n\n(function(global, module, define) {\n\nfunction XorGen(seed) {\n var me = this;\n\n // Set up generator function.\n me.next = function() {\n var w = me.w,\n X = me.X, i = me.i, t, v;\n // Update Weyl generator.\n me.w = w = (w + 0x61c88647) | 0;\n // Update xor generator.\n v = X[(i + 34) & 127];\n t = X[i = ((i + 1) & 127)];\n v ^= v << 13;\n t ^= t << 17;\n v ^= v >>> 15;\n t ^= t >>> 12;\n // Update Xor generator array state.\n v = X[i] = v ^ t;\n me.i = i;\n // Result is the combination.\n return (v + (w ^ (w >>> 16))) | 0;\n };\n\n function init(me, seed) {\n var t, v, i, j, w, X = [], limit = 128;\n if (seed === (seed | 0)) {\n // Numeric seeds initialize v, which is used to generates X.\n v = seed;\n seed = null;\n } else {\n // String seeds are mixed into v and X one character at a time.\n seed = seed + '\\0';\n v = 0;\n limit = Math.max(limit, seed.length);\n }\n // Initialize circular array and weyl value.\n for (i = 0, j = -32; j < limit; ++j) {\n // Put the unicode characters into the array, and shuffle them.\n if (seed) v ^= seed.charCodeAt((j + 32) % seed.length);\n // After 32 shuffles, take v as the starting w value.\n if (j === 0) w = v;\n v ^= v << 10;\n v ^= v >>> 15;\n v ^= v << 4;\n v ^= v >>> 13;\n if (j >= 0) {\n w = (w + 0x61c88647) | 0; // Weyl.\n t = (X[j & 127] ^= (v + w)); // Combine xor and weyl to init array.\n i = (0 == t) ? i + 1 : 0; // Count zeroes.\n }\n }\n // We have detected all zeroes; make the key nonzero.\n if (i >= 128) {\n X[(seed && seed.length || 0) & 127] = -1;\n }\n // Run the generator 512 times to further mix the state before using it.\n // Factoring this as a function slows the main generator, so it is just\n // unrolled here. The weyl generator is not advanced while warming up.\n i = 127;\n for (j = 4 * 128; j > 0; --j) {\n v = X[(i + 34) & 127];\n t = X[i = ((i + 1) & 127)];\n v ^= v << 13;\n t ^= t << 17;\n v ^= v >>> 15;\n t ^= t >>> 12;\n X[i] = v ^ t;\n }\n // Storing state as object members is faster than using closure variables.\n me.w = w;\n me.X = X;\n me.i = i;\n }\n\n init(me, seed);\n}\n\nfunction copy(f, t) {\n t.i = f.i;\n t.w = f.w;\n t.X = f.X.slice();\n return t;\n};\n\nfunction impl(seed, opts) {\n if (seed == null) seed = +(new Date);\n var xg = new XorGen(seed),\n state = opts && opts.state,\n prng = function() { return (xg.next() >>> 0) / 0x100000000; };\n prng.double = function() {\n do {\n var top = xg.next() >>> 11,\n bot = (xg.next() >>> 0) / 0x100000000,\n result = (top + bot) / (1 << 21);\n } while (result === 0);\n return result;\n };\n prng.int32 = xg.next;\n prng.quick = prng;\n if (state) {\n if (state.X) copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.xor4096 = impl;\n}\n\n})(\n this, // window object or global\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n", "// A Javascript implementaion of the \"Tyche-i\" prng algorithm by\n// Samuel Neves and Filipe Araujo.\n// See https://eden.dei.uc.pt/~sneves/pubs/2011-snfa2.pdf\n\n(function(global, module, define) {\n\nfunction XorGen(seed) {\n var me = this, strseed = '';\n\n // Set up generator function.\n me.next = function() {\n var b = me.b, c = me.c, d = me.d, a = me.a;\n b = (b << 25) ^ (b >>> 7) ^ c;\n c = (c - d) | 0;\n d = (d << 24) ^ (d >>> 8) ^ a;\n a = (a - b) | 0;\n me.b = b = (b << 20) ^ (b >>> 12) ^ c;\n me.c = c = (c - d) | 0;\n me.d = (d << 16) ^ (c >>> 16) ^ a;\n return me.a = (a - b) | 0;\n };\n\n /* The following is non-inverted tyche, which has better internal\n * bit diffusion, but which is about 25% slower than tyche-i in JS.\n me.next = function() {\n var a = me.a, b = me.b, c = me.c, d = me.d;\n a = (me.a + me.b | 0) >>> 0;\n d = me.d ^ a; d = d << 16 ^ d >>> 16;\n c = me.c + d | 0;\n b = me.b ^ c; b = b << 12 ^ d >>> 20;\n me.a = a = a + b | 0;\n d = d ^ a; me.d = d = d << 8 ^ d >>> 24;\n me.c = c = c + d | 0;\n b = b ^ c;\n return me.b = (b << 7 ^ b >>> 25);\n }\n */\n\n me.a = 0;\n me.b = 0;\n me.c = 2654435769 | 0;\n me.d = 1367130551;\n\n if (seed === Math.floor(seed)) {\n // Integer seed.\n me.a = (seed / 0x100000000) | 0;\n me.b = seed | 0;\n } else {\n // String seed.\n strseed += seed;\n }\n\n // Mix in string seed, then discard an initial batch of 64 values.\n for (var k = 0; k < strseed.length + 20; k++) {\n me.b ^= strseed.charCodeAt(k) | 0;\n me.next();\n }\n}\n\nfunction copy(f, t) {\n t.a = f.a;\n t.b = f.b;\n t.c = f.c;\n t.d = f.d;\n return t;\n};\n\nfunction impl(seed, opts) {\n var xg = new XorGen(seed),\n state = opts && opts.state,\n prng = function() { return (xg.next() >>> 0) / 0x100000000; };\n prng.double = function() {\n do {\n var top = xg.next() >>> 11,\n bot = (xg.next() >>> 0) / 0x100000000,\n result = (top + bot) / (1 << 21);\n } while (result === 0);\n return result;\n };\n prng.int32 = xg.next;\n prng.quick = prng;\n if (state) {\n if (typeof(state) == 'object') copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.tychei = impl;\n}\n\n})(\n this,\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n\n\n", "/*\nCopyright 2019 David Bau.\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\nTORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\nSOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n*/\n\n(function (global, pool, math) {\n//\n// The following constants are related to IEEE 754 limits.\n//\n\nvar width = 256, // each RC4 output is 0 <= x < 256\n chunks = 6, // at least six RC4 outputs for each double\n digits = 52, // there are 52 significant digits in a double\n rngname = 'random', // rngname: name for Math.random and Math.seedrandom\n startdenom = math.pow(width, chunks),\n significance = math.pow(2, digits),\n overflow = significance * 2,\n mask = width - 1,\n nodecrypto; // node.js crypto module, initialized at the bottom.\n\n//\n// seedrandom()\n// This is the seedrandom function described above.\n//\nfunction seedrandom(seed, options, callback) {\n var key = [];\n options = (options == true) ? { entropy: true } : (options || {});\n\n // Flatten the seed string or build one from local entropy if needed.\n var shortseed = mixkey(flatten(\n options.entropy ? [seed, tostring(pool)] :\n (seed == null) ? autoseed() : seed, 3), key);\n\n // Use the seed to initialize an ARC4 generator.\n var arc4 = new ARC4(key);\n\n // This function returns a random double in [0, 1) that contains\n // randomness in every bit of the mantissa of the IEEE 754 value.\n var prng = function() {\n var n = arc4.g(chunks), // Start with a numerator n < 2 ^ 48\n d = startdenom, // and denominator d = 2 ^ 48.\n x = 0; // and no 'extra last byte'.\n while (n < significance) { // Fill up all significant digits by\n n = (n + x) * width; // shifting numerator and\n d *= width; // denominator and generating a\n x = arc4.g(1); // new least-significant-byte.\n }\n while (n >= overflow) { // To avoid rounding up, before adding\n n /= 2; // last byte, shift everything\n d /= 2; // right using integer math until\n x >>>= 1; // we have exactly the desired bits.\n }\n return (n + x) / d; // Form the number within [0, 1).\n };\n\n prng.int32 = function() { return arc4.g(4) | 0; }\n prng.quick = function() { return arc4.g(4) / 0x100000000; }\n prng.double = prng;\n\n // Mix the randomness into accumulated entropy.\n mixkey(tostring(arc4.S), pool);\n\n // Calling convention: what to return as a function of prng, seed, is_math.\n return (options.pass || callback ||\n function(prng, seed, is_math_call, state) {\n if (state) {\n // Load the arc4 state from the given state if it has an S array.\n if (state.S) { copy(state, arc4); }\n // Only provide the .state method if requested via options.state.\n prng.state = function() { return copy(arc4, {}); }\n }\n\n // If called as a method of Math (Math.seedrandom()), mutate\n // Math.random because that is how seedrandom.js has worked since v1.0.\n if (is_math_call) { math[rngname] = prng; return seed; }\n\n // Otherwise, it is a newer calling convention, so return the\n // prng directly.\n else return prng;\n })(\n prng,\n shortseed,\n 'global' in options ? options.global : (this == math),\n options.state);\n}\n\n//\n// ARC4\n//\n// An ARC4 implementation. The constructor takes a key in the form of\n// an array of at most (width) integers that should be 0 <= x < (width).\n//\n// The g(count) method returns a pseudorandom integer that concatenates\n// the next (count) outputs from ARC4. Its return value is a number x\n// that is in the range 0 <= x < (width ^ count).\n//\nfunction ARC4(key) {\n var t, keylen = key.length,\n me = this, i = 0, j = me.i = me.j = 0, s = me.S = [];\n\n // The empty key [] is treated as [0].\n if (!keylen) { key = [keylen++]; }\n\n // Set up S using the standard key scheduling algorithm.\n while (i < width) {\n s[i] = i++;\n }\n for (i = 0; i < width; i++) {\n s[i] = s[j = mask & (j + key[i % keylen] + (t = s[i]))];\n s[j] = t;\n }\n\n // The \"g\" method returns the next (count) outputs as one number.\n (me.g = function(count) {\n // Using instance members instead of closure state nearly doubles speed.\n var t, r = 0,\n i = me.i, j = me.j, s = me.S;\n while (count--) {\n t = s[i = mask & (i + 1)];\n r = r * width + s[mask & ((s[i] = s[j = mask & (j + t)]) + (s[j] = t))];\n }\n me.i = i; me.j = j;\n return r;\n // For robust unpredictability, the function call below automatically\n // discards an initial batch of values. This is called RC4-drop[256].\n // See http://google.com/search?q=rsa+fluhrer+response&btnI\n })(width);\n}\n\n//\n// copy()\n// Copies internal state of ARC4 to or from a plain object.\n//\nfunction copy(f, t) {\n t.i = f.i;\n t.j = f.j;\n t.S = f.S.slice();\n return t;\n};\n\n//\n// flatten()\n// Converts an object tree to nested arrays of strings.\n//\nfunction flatten(obj, depth) {\n var result = [], typ = (typeof obj), prop;\n if (depth && typ == 'object') {\n for (prop in obj) {\n try { result.push(flatten(obj[prop], depth - 1)); } catch (e) {}\n }\n }\n return (result.length ? result : typ == 'string' ? obj : obj + '\\0');\n}\n\n//\n// mixkey()\n// Mixes a string seed into a key that is an array of integers, and\n// returns a shortened string seed that is equivalent to the result key.\n//\nfunction mixkey(seed, key) {\n var stringseed = seed + '', smear, j = 0;\n while (j < stringseed.length) {\n key[mask & j] =\n mask & ((smear ^= key[mask & j] * 19) + stringseed.charCodeAt(j++));\n }\n return tostring(key);\n}\n\n//\n// autoseed()\n// Returns an object for autoseeding, using window.crypto and Node crypto\n// module if available.\n//\nfunction autoseed() {\n try {\n var out;\n if (nodecrypto && (out = nodecrypto.randomBytes)) {\n // The use of 'out' to remember randomBytes makes tight minified code.\n out = out(width);\n } else {\n out = new Uint8Array(width);\n (global.crypto || global.msCrypto).getRandomValues(out);\n }\n return tostring(out);\n } catch (e) {\n var browser = global.navigator,\n plugins = browser && browser.plugins;\n return [+new Date, global, plugins, global.screen, tostring(pool)];\n }\n}\n\n//\n// tostring()\n// Converts an array of charcodes to a string\n//\nfunction tostring(a) {\n return String.fromCharCode.apply(0, a);\n}\n\n//\n// When seedrandom.js is loaded, we immediately mix a few bits\n// from the built-in RNG into the entropy pool. Because we do\n// not want to interfere with deterministic PRNG state later,\n// seedrandom will not call math.random on its own again after\n// initialization.\n//\nmixkey(math.random(), pool);\n\n//\n// Nodejs and AMD support: export the implementation as a module using\n// either convention.\n//\nif ((typeof module) == 'object' && module.exports) {\n module.exports = seedrandom;\n // When in node.js, try using crypto package for autoseeding.\n try {\n nodecrypto = require('crypto');\n } catch (ex) {}\n} else if ((typeof define) == 'function' && define.amd) {\n define(function() { return seedrandom; });\n} else {\n // When included as a plain script, set up Math.seedrandom global.\n math['seed' + rngname] = seedrandom;\n}\n\n\n// End anonymous scope, and pass initial values.\n})(\n // global: `self` in browsers (including strict mode and web workers),\n // otherwise `this` in Node and other environments\n (typeof self !== 'undefined') ? self : this,\n [], // pool: entropy pool starts empty\n Math // math: package containing random, pow, and seedrandom\n);\n", "// A library of seedable RNGs implemented in Javascript.\n//\n// Usage:\n//\n// var seedrandom = require('seedrandom');\n// var random = seedrandom(1); // or any seed.\n// var x = random(); // 0 <= x < 1. Every bit is random.\n// var x = random.quick(); // 0 <= x < 1. 32 bits of randomness.\n\n// alea, a 53-bit multiply-with-carry generator by Johannes Baag\u00F8e.\n// Period: ~2^116\n// Reported to pass all BigCrush tests.\nvar alea = require('./lib/alea');\n\n// xor128, a pure xor-shift generator by George Marsaglia.\n// Period: 2^128-1.\n// Reported to fail: MatrixRank and LinearComp.\nvar xor128 = require('./lib/xor128');\n\n// xorwow, George Marsaglia's 160-bit xor-shift combined plus weyl.\n// Period: 2^192-2^32\n// Reported to fail: CollisionOver, SimpPoker, and LinearComp.\nvar xorwow = require('./lib/xorwow');\n\n// xorshift7, by Fran\u00E7ois Panneton and Pierre L'ecuyer, takes\n// a different approach: it adds robustness by allowing more shifts\n// than Marsaglia's original three. It is a 7-shift generator\n// with 256 bits, that passes BigCrush with no systmatic failures.\n// Period 2^256-1.\n// No systematic BigCrush failures reported.\nvar xorshift7 = require('./lib/xorshift7');\n\n// xor4096, by Richard Brent, is a 4096-bit xor-shift with a\n// very long period that also adds a Weyl generator. It also passes\n// BigCrush with no systematic failures. Its long period may\n// be useful if you have many generators and need to avoid\n// collisions.\n// Period: 2^4128-2^32.\n// No systematic BigCrush failures reported.\nvar xor4096 = require('./lib/xor4096');\n\n// Tyche-i, by Samuel Neves and Filipe Araujo, is a bit-shifting random\n// number generator derived from ChaCha, a modern stream cipher.\n// https://eden.dei.uc.pt/~sneves/pubs/2011-snfa2.pdf\n// Period: ~2^127\n// No systematic BigCrush failures reported.\nvar tychei = require('./lib/tychei');\n\n// The original ARC4-based prng included in this library.\n// Period: ~2^1600\nvar sr = require('./seedrandom');\n\nsr.alea = alea;\nsr.xor128 = xor128;\nsr.xorwow = xorwow;\nsr.xorshift7 = xorshift7;\nsr.xor4096 = xor4096;\nsr.tychei = tychei;\n\nmodule.exports = sr;\n", "", "", "", "", "\nvar WasmBackendModuleThreadedSimd = (function() {\n var _scriptDir = typeof document !== 'undefined' && document.currentScript ? document.currentScript.src : undefined;\n if (typeof __filename !== 'undefined') _scriptDir = _scriptDir || __filename;\n return (\nfunction(WasmBackendModuleThreadedSimd) {\n WasmBackendModuleThreadedSimd = WasmBackendModuleThreadedSimd || {};\n\nfunction GROWABLE_HEAP_I8(){if(wasmMemory.buffer!=buffer){updateGlobalBufferAndViews(wasmMemory.buffer)}return HEAP8}function GROWABLE_HEAP_U8(){if(wasmMemory.buffer!=buffer){updateGlobalBufferAndViews(wasmMemory.buffer)}return HEAPU8}function GROWABLE_HEAP_I32(){if(wasmMemory.buffer!=buffer){updateGlobalBufferAndViews(wasmMemory.buffer)}return HEAP32}function GROWABLE_HEAP_U32(){if(wasmMemory.buffer!=buffer){updateGlobalBufferAndViews(wasmMemory.buffer)}return HEAPU32}function GROWABLE_HEAP_F64(){if(wasmMemory.buffer!=buffer){updateGlobalBufferAndViews(wasmMemory.buffer)}return HEAPF64}var Module=typeof WasmBackendModuleThreadedSimd!==\"undefined\"?WasmBackendModuleThreadedSimd:{};var readyPromiseResolve,readyPromiseReject;Module[\"ready\"]=new Promise(function(resolve,reject){readyPromiseResolve=resolve;readyPromiseReject=reject});var moduleOverrides={};var key;for(key in Module){if(Module.hasOwnProperty(key)){moduleOverrides[key]=Module[key]}}var arguments_=[];var thisProgram=\"./this.program\";var quit_=function(status,toThrow){throw toThrow};var ENVIRONMENT_IS_WEB=false;var ENVIRONMENT_IS_WORKER=false;var ENVIRONMENT_IS_NODE=false;var ENVIRONMENT_IS_SHELL=false;ENVIRONMENT_IS_WEB=typeof window===\"object\";ENVIRONMENT_IS_WORKER=typeof importScripts===\"function\";ENVIRONMENT_IS_NODE=typeof process===\"object\"&&typeof process.versions===\"object\"&&typeof process.versions.node===\"string\";ENVIRONMENT_IS_SHELL=!ENVIRONMENT_IS_WEB&&!ENVIRONMENT_IS_NODE&&!ENVIRONMENT_IS_WORKER;var ENVIRONMENT_IS_PTHREAD=Module[\"ENVIRONMENT_IS_PTHREAD\"]||false;if(ENVIRONMENT_IS_PTHREAD){buffer=Module[\"buffer\"]}var scriptDirectory=\"\";function locateFile(path){if(Module[\"locateFile\"]){return Module[\"locateFile\"](path,scriptDirectory)}return scriptDirectory+path}var read_,readAsync,readBinary,setWindowTitle;var nodeFS;var nodePath;if(ENVIRONMENT_IS_NODE){if(ENVIRONMENT_IS_WORKER){scriptDirectory=require(\"path\").dirname(scriptDirectory)+\"/\"}else{scriptDirectory=__dirname+\"/\"}read_=function shell_read(filename,binary){if(!nodeFS)nodeFS=require(\"fs\");if(!nodePath)nodePath=require(\"path\");filename=nodePath[\"normalize\"](filename);return nodeFS[\"readFileSync\"](filename,binary?null:\"utf8\")};readBinary=function readBinary(filename){var ret=read_(filename,true);if(!ret.buffer){ret=new Uint8Array(ret)}assert(ret.buffer);return ret};if(process[\"argv\"].length>1){thisProgram=process[\"argv\"][1].replace(/\\\\/g,\"/\")}arguments_=process[\"argv\"].slice(2);process[\"on\"](\"uncaughtException\",function(ex){if(!(ex instanceof ExitStatus)){throw ex}});process[\"on\"](\"unhandledRejection\",abort);quit_=function(status){process[\"exit\"](status)};Module[\"inspect\"]=function(){return\"[Emscripten Module object]\"};var nodeWorkerThreads;try{nodeWorkerThreads=require(\"worker_threads\")}catch(e){console.error('The \"worker_threads\" module is not supported in this node.js build - perhaps a newer version is needed?');throw e}global.Worker=nodeWorkerThreads.Worker}else if(ENVIRONMENT_IS_SHELL){if(typeof read!=\"undefined\"){read_=function shell_read(f){return read(f)}}readBinary=function readBinary(f){var data;if(typeof readbuffer===\"function\"){return new Uint8Array(readbuffer(f))}data=read(f,\"binary\");assert(typeof data===\"object\");return data};if(typeof scriptArgs!=\"undefined\"){arguments_=scriptArgs}else if(typeof arguments!=\"undefined\"){arguments_=arguments}if(typeof quit===\"function\"){quit_=function(status){quit(status)}}if(typeof print!==\"undefined\"){if(typeof console===\"undefined\")console={};console.log=print;console.warn=console.error=typeof printErr!==\"undefined\"?printErr:print}}else if(ENVIRONMENT_IS_WEB||ENVIRONMENT_IS_WORKER){if(ENVIRONMENT_IS_WORKER){scriptDirectory=self.location.href}else if(typeof document!==\"undefined\"&&document.currentScript){scriptDirectory=document.currentScript.src}if(typeof _scriptDir !== \"undefined\" && _scriptDir){scriptDirectory=_scriptDir}if(scriptDirectory.indexOf(\"blob:\")!==0){scriptDirectory=scriptDirectory.substr(0,scriptDirectory.lastIndexOf(\"/\")+1)}else{scriptDirectory=\"\"}if(ENVIRONMENT_IS_NODE){read_=function shell_read(filename,binary){if(!nodeFS)nodeFS=require(\"fs\");if(!nodePath)nodePath=require(\"path\");filename=nodePath[\"normalize\"](filename);return nodeFS[\"readFileSync\"](filename,binary?null:\"utf8\")};readBinary=function readBinary(filename){var ret=read_(filename,true);if(!ret.buffer){ret=new Uint8Array(ret)}assert(ret.buffer);return ret}}else{read_=function(url){var xhr=new XMLHttpRequest;xhr.open(\"GET\",url,false);xhr.send(null);return xhr.responseText};if(ENVIRONMENT_IS_WORKER){readBinary=function(url){var xhr=new XMLHttpRequest;xhr.open(\"GET\",url,false);xhr.responseType=\"arraybuffer\";xhr.send(null);return new Uint8Array(xhr.response)}}readAsync=function(url,onload,onerror){var xhr=new XMLHttpRequest;xhr.open(\"GET\",url,true);xhr.responseType=\"arraybuffer\";xhr.onload=function(){if(xhr.status==200||xhr.status==0&&xhr.response){onload(xhr.response);return}onerror()};xhr.onerror=onerror;xhr.send(null)}}setWindowTitle=function(title){document.title=title}}else{}if(ENVIRONMENT_IS_NODE){if(typeof performance===\"undefined\"){global.performance=require(\"perf_hooks\").performance}}var out=Module[\"print\"]||console.log.bind(console);var err=Module[\"printErr\"]||console.warn.bind(console);for(key in moduleOverrides){if(moduleOverrides.hasOwnProperty(key)){Module[key]=moduleOverrides[key]}}moduleOverrides=null;if(Module[\"arguments\"])arguments_=Module[\"arguments\"];if(Module[\"thisProgram\"])thisProgram=Module[\"thisProgram\"];if(Module[\"quit\"])quit_=Module[\"quit\"];var Atomics_load=Atomics.load;var Atomics_store=Atomics.store;var Atomics_compareExchange=Atomics.compareExchange;var wasmBinary;if(Module[\"wasmBinary\"])wasmBinary=Module[\"wasmBinary\"];var noExitRuntime=Module[\"noExitRuntime\"]||true;if(typeof WebAssembly!==\"object\"){abort(\"no native wasm support detected\")}var wasmMemory;var wasmModule;var ABORT=false;var EXITSTATUS;function assert(condition,text){if(!condition){abort(\"Assertion failed: \"+text)}}function getCFunc(ident){var func=Module[\"_\"+ident];assert(func,\"Cannot call unknown function \"+ident+\", make sure it is exported\");return func}function ccall(ident,returnType,argTypes,args,opts){var toC={\"string\":function(str){var ret=0;if(str!==null&&str!==undefined&&str!==0){var len=(str.length<<2)+1;ret=stackAlloc(len);stringToUTF8(str,ret,len)}return ret},\"array\":function(arr){var ret=stackAlloc(arr.length);writeArrayToMemory(arr,ret);return ret}};function convertReturnValue(ret){if(returnType===\"string\")return UTF8ToString(ret);if(returnType===\"boolean\")return Boolean(ret);return ret}var func=getCFunc(ident);var cArgs=[];var stack=0;if(args){for(var i=0;i=endIdx)){var u0=heap[idx++];if(!u0)return str;if(!(u0&128)){str+=String.fromCharCode(u0);continue}var u1=heap[idx++]&63;if((u0&224)==192){str+=String.fromCharCode((u0&31)<<6|u1);continue}var u2=heap[idx++]&63;if((u0&240)==224){u0=(u0&15)<<12|u1<<6|u2}else{u0=(u0&7)<<18|u1<<12|u2<<6|heap[idx++]&63}if(u0<65536){str+=String.fromCharCode(u0)}else{var ch=u0-65536;str+=String.fromCharCode(55296|ch>>10,56320|ch&1023)}}return str}function UTF8ToString(ptr,maxBytesToRead){return ptr?UTF8ArrayToString(GROWABLE_HEAP_U8(),ptr,maxBytesToRead):\"\"}function stringToUTF8Array(str,heap,outIdx,maxBytesToWrite){if(!(maxBytesToWrite>0))return 0;var startIdx=outIdx;var endIdx=outIdx+maxBytesToWrite-1;for(var i=0;i=55296&&u<=57343){var u1=str.charCodeAt(++i);u=65536+((u&1023)<<10)|u1&1023}if(u<=127){if(outIdx>=endIdx)break;heap[outIdx++]=u}else if(u<=2047){if(outIdx+1>=endIdx)break;heap[outIdx++]=192|u>>6;heap[outIdx++]=128|u&63}else if(u<=65535){if(outIdx+2>=endIdx)break;heap[outIdx++]=224|u>>12;heap[outIdx++]=128|u>>6&63;heap[outIdx++]=128|u&63}else{if(outIdx+3>=endIdx)break;heap[outIdx++]=240|u>>18;heap[outIdx++]=128|u>>12&63;heap[outIdx++]=128|u>>6&63;heap[outIdx++]=128|u&63}}heap[outIdx]=0;return outIdx-startIdx}function stringToUTF8(str,outPtr,maxBytesToWrite){return stringToUTF8Array(str,GROWABLE_HEAP_U8(),outPtr,maxBytesToWrite)}function lengthBytesUTF8(str){var len=0;for(var i=0;i=55296&&u<=57343)u=65536+((u&1023)<<10)|str.charCodeAt(++i)&1023;if(u<=127)++len;else if(u<=2047)len+=2;else if(u<=65535)len+=3;else len+=4}return len}function writeArrayToMemory(array,buffer){GROWABLE_HEAP_I8().set(array,buffer)}function alignUp(x,multiple){if(x%multiple>0){x+=multiple-x%multiple}return x}var buffer,HEAP8,HEAPU8,HEAP16,HEAPU16,HEAP32,HEAPU32,HEAPF32,HEAPF64;function updateGlobalBufferAndViews(buf){buffer=buf;Module[\"HEAP8\"]=HEAP8=new Int8Array(buf);Module[\"HEAP16\"]=HEAP16=new Int16Array(buf);Module[\"HEAP32\"]=HEAP32=new Int32Array(buf);Module[\"HEAPU8\"]=HEAPU8=new Uint8Array(buf);Module[\"HEAPU16\"]=HEAPU16=new Uint16Array(buf);Module[\"HEAPU32\"]=HEAPU32=new Uint32Array(buf);Module[\"HEAPF32\"]=HEAPF32=new Float32Array(buf);Module[\"HEAPF64\"]=HEAPF64=new Float64Array(buf)}var INITIAL_MEMORY=Module[\"INITIAL_MEMORY\"]||16777216;if(ENVIRONMENT_IS_PTHREAD){wasmMemory=Module[\"wasmMemory\"];buffer=Module[\"buffer\"]}else{if(Module[\"wasmMemory\"]){wasmMemory=Module[\"wasmMemory\"]}else{wasmMemory=new WebAssembly.Memory({\"initial\":INITIAL_MEMORY/65536,\"maximum\":2147483648/65536,\"shared\":true});if(!(wasmMemory.buffer instanceof SharedArrayBuffer)){err(\"requested a shared WebAssembly.Memory but the returned buffer is not a SharedArrayBuffer, indicating that while the browser has SharedArrayBuffer it does not have WebAssembly threads support - you may need to set a flag\");if(ENVIRONMENT_IS_NODE){console.log(\"(on node you may need: --experimental-wasm-threads --experimental-wasm-bulk-memory and also use a recent version)\")}throw Error(\"bad memory\")}}}if(wasmMemory){buffer=wasmMemory.buffer}INITIAL_MEMORY=buffer.byteLength;updateGlobalBufferAndViews(buffer);var wasmTable;var __ATPRERUN__=[];var __ATINIT__=[];var __ATMAIN__=[];var __ATEXIT__=[];var __ATPOSTRUN__=[];var runtimeInitialized=false;var runtimeExited=false;if(!ENVIRONMENT_IS_PTHREAD)__ATINIT__.push({func:function(){___wasm_call_ctors()}});function preRun(){if(ENVIRONMENT_IS_PTHREAD)return;if(Module[\"preRun\"]){if(typeof Module[\"preRun\"]==\"function\")Module[\"preRun\"]=[Module[\"preRun\"]];while(Module[\"preRun\"].length){addOnPreRun(Module[\"preRun\"].shift())}}callRuntimeCallbacks(__ATPRERUN__)}function initRuntime(){runtimeInitialized=true;if(ENVIRONMENT_IS_PTHREAD)return;callRuntimeCallbacks(__ATINIT__)}function preMain(){if(ENVIRONMENT_IS_PTHREAD)return;callRuntimeCallbacks(__ATMAIN__)}function exitRuntime(){if(ENVIRONMENT_IS_PTHREAD)return;runtimeExited=true}function postRun(){if(ENVIRONMENT_IS_PTHREAD)return;if(Module[\"postRun\"]){if(typeof Module[\"postRun\"]==\"function\")Module[\"postRun\"]=[Module[\"postRun\"]];while(Module[\"postRun\"].length){addOnPostRun(Module[\"postRun\"].shift())}}callRuntimeCallbacks(__ATPOSTRUN__)}function addOnPreRun(cb){__ATPRERUN__.unshift(cb)}function addOnPostRun(cb){__ATPOSTRUN__.unshift(cb)}var runDependencies=0;var runDependencyWatcher=null;var dependenciesFulfilled=null;function addRunDependency(id){assert(!ENVIRONMENT_IS_PTHREAD,\"addRunDependency cannot be used in a pthread worker\");runDependencies++;if(Module[\"monitorRunDependencies\"]){Module[\"monitorRunDependencies\"](runDependencies)}}function removeRunDependency(id){runDependencies--;if(Module[\"monitorRunDependencies\"]){Module[\"monitorRunDependencies\"](runDependencies)}if(runDependencies==0){if(runDependencyWatcher!==null){clearInterval(runDependencyWatcher);runDependencyWatcher=null}if(dependenciesFulfilled){var callback=dependenciesFulfilled;dependenciesFulfilled=null;callback()}}}Module[\"preloadedImages\"]={};Module[\"preloadedAudios\"]={};function abort(what){if(Module[\"onAbort\"]){Module[\"onAbort\"](what)}if(ENVIRONMENT_IS_PTHREAD)console.error(\"Pthread aborting at \"+(new Error).stack);what+=\"\";err(what);ABORT=true;EXITSTATUS=1;what=\"abort(\"+what+\"). Build with -s ASSERTIONS=1 for more info.\";var e=new WebAssembly.RuntimeError(what);readyPromiseReject(e);throw e}function hasPrefix(str,prefix){return String.prototype.startsWith?str.startsWith(prefix):str.indexOf(prefix)===0}var dataURIPrefix=\"data:application/octet-stream;base64,\";function isDataURI(filename){return hasPrefix(filename,dataURIPrefix)}var fileURIPrefix=\"file://\";function isFileURI(filename){return hasPrefix(filename,fileURIPrefix)}var wasmBinaryFile=\"tfjs-backend-wasm-threaded-simd.wasm\";if(!isDataURI(wasmBinaryFile)){wasmBinaryFile=locateFile(wasmBinaryFile)}function getBinary(file){try{if(file==wasmBinaryFile&&wasmBinary){return new Uint8Array(wasmBinary)}if(readBinary){return readBinary(file)}else{throw\"both async and sync fetching of the wasm failed\"}}catch(err){abort(err)}}function getBinaryPromise(){if(!wasmBinary&&(ENVIRONMENT_IS_WEB||ENVIRONMENT_IS_WORKER)){if(typeof fetch===\"function\"&&!isFileURI(wasmBinaryFile)){return fetch(wasmBinaryFile,{credentials:\"same-origin\"}).then(function(response){if(!response[\"ok\"]){throw\"failed to load wasm binary file at '\"+wasmBinaryFile+\"'\"}return response[\"arrayBuffer\"]()}).catch(function(){return getBinary(wasmBinaryFile)})}else{if(readAsync){return new Promise(function(resolve,reject){readAsync(wasmBinaryFile,function(response){resolve(new Uint8Array(response))},reject)})}}}return Promise.resolve().then(function(){return getBinary(wasmBinaryFile)})}function createWasm(){var info={\"a\":asmLibraryArg};function receiveInstance(instance,module){var exports=instance.exports;Module[\"asm\"]=exports;wasmTable=Module[\"asm\"][\"F\"];wasmModule=module;if(!ENVIRONMENT_IS_PTHREAD){var numWorkersToLoad=PThread.unusedWorkers.length;PThread.unusedWorkers.forEach(function(w){PThread.loadWasmModuleToWorker(w,function(){if(!--numWorkersToLoad)removeRunDependency(\"wasm-instantiate\")})})}}if(!ENVIRONMENT_IS_PTHREAD){addRunDependency(\"wasm-instantiate\")}function receiveInstantiatedSource(output){receiveInstance(output[\"instance\"],output[\"module\"])}function instantiateArrayBuffer(receiver){return getBinaryPromise().then(function(binary){return WebAssembly.instantiate(binary,info)}).then(receiver,function(reason){err(\"failed to asynchronously prepare wasm: \"+reason);abort(reason)})}function instantiateAsync(){if(!wasmBinary&&typeof WebAssembly.instantiateStreaming===\"function\"&&!isDataURI(wasmBinaryFile)&&!isFileURI(wasmBinaryFile)&&typeof fetch===\"function\"){return fetch(wasmBinaryFile,{credentials:\"same-origin\"}).then(function(response){var result=WebAssembly.instantiateStreaming(response,info);return result.then(receiveInstantiatedSource,function(reason){err(\"wasm streaming compile failed: \"+reason);err(\"falling back to ArrayBuffer instantiation\");return instantiateArrayBuffer(receiveInstantiatedSource)})})}else{return instantiateArrayBuffer(receiveInstantiatedSource)}}if(Module[\"instantiateWasm\"]){try{var exports=Module[\"instantiateWasm\"](info,receiveInstance);return exports}catch(e){err(\"Module.instantiateWasm callback failed with error: \"+e);return false}}instantiateAsync().catch(readyPromiseReject);return{}}var ASM_CONSTS={10024:function(){throw\"Canceled!\"},10042:function($0,$1){setTimeout(function(){__emscripten_do_dispatch_to_thread($0,$1)},0)}};function initPthreadsJS(){PThread.initRuntime()}function callRuntimeCallbacks(callbacks){while(callbacks.length>0){var callback=callbacks.shift();if(typeof callback==\"function\"){callback(Module);continue}var func=callback.func;if(typeof func===\"number\"){if(callback.arg===undefined){wasmTable.get(func)()}else{wasmTable.get(func)(callback.arg)}}else{func(callback.arg===undefined?null:callback.arg)}}}function _emscripten_futex_wake(addr,count){if(addr<=0||addr>GROWABLE_HEAP_I8().length||addr&3!=0||count<0)return-28;if(count==0)return 0;if(count>=2147483647)count=Infinity;var mainThreadWaitAddress=Atomics.load(GROWABLE_HEAP_I32(),__emscripten_main_thread_futex>>2);var mainThreadWoken=0;if(mainThreadWaitAddress==addr){var loadedAddr=Atomics.compareExchange(GROWABLE_HEAP_I32(),__emscripten_main_thread_futex>>2,mainThreadWaitAddress,0);if(loadedAddr==mainThreadWaitAddress){--count;mainThreadWoken=1;if(count<=0)return 1}}var ret=Atomics.notify(GROWABLE_HEAP_I32(),addr>>2,count);if(ret>=0)return ret+mainThreadWoken;throw\"Atomics.notify returned an unexpected value \"+ret}Module[\"_emscripten_futex_wake\"]=_emscripten_futex_wake;function killThread(pthread_ptr){if(ENVIRONMENT_IS_PTHREAD)throw\"Internal Error! killThread() can only ever be called from main application thread!\";if(!pthread_ptr)throw\"Internal Error! Null pthread_ptr in killThread!\";GROWABLE_HEAP_I32()[pthread_ptr+12>>2]=0;var pthread=PThread.pthreads[pthread_ptr];pthread.worker.terminate();PThread.freeThreadData(pthread);PThread.runningWorkers.splice(PThread.runningWorkers.indexOf(pthread.worker),1);pthread.worker.pthread=undefined}function cancelThread(pthread_ptr){if(ENVIRONMENT_IS_PTHREAD)throw\"Internal Error! cancelThread() can only ever be called from main application thread!\";if(!pthread_ptr)throw\"Internal Error! Null pthread_ptr in cancelThread!\";var pthread=PThread.pthreads[pthread_ptr];pthread.worker.postMessage({\"cmd\":\"cancel\"})}function cleanupThread(pthread_ptr){if(ENVIRONMENT_IS_PTHREAD)throw\"Internal Error! cleanupThread() can only ever be called from main application thread!\";if(!pthread_ptr)throw\"Internal Error! Null pthread_ptr in cleanupThread!\";var pthread=PThread.pthreads[pthread_ptr];if(pthread){GROWABLE_HEAP_I32()[pthread_ptr+12>>2]=0;var worker=pthread.worker;PThread.returnWorkerToPool(worker)}}var PThread={unusedWorkers:[],runningWorkers:[],initMainThreadBlock:function(){var pthreadPoolSize=Math.min(4,Math.max(1,(navigator.hardwareConcurrency||1)/2));for(var i=0;i>2]=tb;var headPtr=tb+152;GROWABLE_HEAP_I32()[headPtr>>2]=headPtr;var tlsMemory=_malloc(512);for(var i=0;i<128;++i)GROWABLE_HEAP_U32()[tlsMemory/4+i]=0;Atomics.store(GROWABLE_HEAP_U32(),tb+100>>2,tlsMemory);Atomics.store(GROWABLE_HEAP_U32(),tb+40>>2,tb);__emscripten_thread_init(tb,!ENVIRONMENT_IS_WORKER,1);_emscripten_register_main_browser_thread_id(tb)},initWorker:function(){},pthreads:{},threadExitHandlers:[],setThreadStatus:function(){},runExitHandlers:function(){while(PThread.threadExitHandlers.length>0){PThread.threadExitHandlers.pop()()}if(ENVIRONMENT_IS_PTHREAD&&_pthread_self())___pthread_tsd_run_dtors()},runExitHandlersAndDeinitThread:function(tb,exitCode){Atomics.store(GROWABLE_HEAP_U32(),tb+56>>2,1);Atomics.store(GROWABLE_HEAP_U32(),tb+60>>2,0);PThread.runExitHandlers();Atomics.store(GROWABLE_HEAP_U32(),tb+4>>2,exitCode);Atomics.store(GROWABLE_HEAP_U32(),tb+0>>2,1);_emscripten_futex_wake(tb+0,2147483647);__emscripten_thread_init(0,0,0)},threadExit:function(exitCode){var tb=_pthread_self();if(tb){PThread.runExitHandlersAndDeinitThread(tb,exitCode);if(ENVIRONMENT_IS_PTHREAD){postMessage({\"cmd\":\"exit\"})}}},threadCancel:function(){PThread.runExitHandlersAndDeinitThread(_pthread_self(),-1);postMessage({\"cmd\":\"cancelDone\"})},terminateAllThreads:function(){for(var t in PThread.pthreads){var pthread=PThread.pthreads[t];if(pthread&&pthread.worker){PThread.returnWorkerToPool(pthread.worker)}}PThread.pthreads={};for(var i=0;i>2];GROWABLE_HEAP_I32()[pthread.threadInfoStruct+100>>2]=0;_free(tlsMemory);_free(pthread.threadInfoStruct)}pthread.threadInfoStruct=0;if(pthread.allocatedOwnStack&&pthread.stackBase)_free(pthread.stackBase);pthread.stackBase=0;if(pthread.worker)pthread.worker.pthread=null},returnWorkerToPool:function(worker){PThread.runWithoutMainThreadQueuedCalls(function(){delete PThread.pthreads[worker.pthread.threadInfoStruct];PThread.unusedWorkers.push(worker);PThread.runningWorkers.splice(PThread.runningWorkers.indexOf(worker),1);PThread.freeThreadData(worker.pthread);worker.pthread=undefined})},runWithoutMainThreadQueuedCalls:function(func){GROWABLE_HEAP_I32()[__emscripten_allow_main_runtime_queued_calls>>2]=0;try{func()}finally{GROWABLE_HEAP_I32()[__emscripten_allow_main_runtime_queued_calls>>2]=1}},receiveObjectTransfer:function(data){},loadWasmModuleToWorker:function(worker,onFinishedLoading){worker.onmessage=function(e){var d=e[\"data\"];var cmd=d[\"cmd\"];if(worker.pthread)PThread.currentProxiedOperationCallerThread=worker.pthread.threadInfoStruct;if(d[\"targetThread\"]&&d[\"targetThread\"]!=_pthread_self()){var thread=PThread.pthreads[d.targetThread];if(thread){thread.worker.postMessage(e.data,d[\"transferList\"])}else{console.error('Internal error! Worker sent a message \"'+cmd+'\" to target pthread '+d[\"targetThread\"]+\", but that thread no longer exists!\")}PThread.currentProxiedOperationCallerThread=undefined;return}if(cmd===\"processQueuedMainThreadWork\"){_emscripten_main_thread_process_queued_calls()}else if(cmd===\"spawnThread\"){spawnThread(e.data)}else if(cmd===\"cleanupThread\"){cleanupThread(d[\"thread\"])}else if(cmd===\"killThread\"){killThread(d[\"thread\"])}else if(cmd===\"cancelThread\"){cancelThread(d[\"thread\"])}else if(cmd===\"loaded\"){worker.loaded=true;if(onFinishedLoading)onFinishedLoading(worker);if(worker.runPthread){worker.runPthread();delete worker.runPthread}}else if(cmd===\"print\"){out(\"Thread \"+d[\"threadId\"]+\": \"+d[\"text\"])}else if(cmd===\"printErr\"){err(\"Thread \"+d[\"threadId\"]+\": \"+d[\"text\"])}else if(cmd===\"alert\"){alert(\"Thread \"+d[\"threadId\"]+\": \"+d[\"text\"])}else if(cmd===\"exit\"){var detached=worker.pthread&&Atomics.load(GROWABLE_HEAP_U32(),worker.pthread.threadInfoStruct+64>>2);if(detached){PThread.returnWorkerToPool(worker)}}else if(cmd===\"exitProcess\"){try{exit(d[\"returnCode\"])}catch(e){if(e instanceof ExitStatus)return;throw e}}else if(cmd===\"cancelDone\"){PThread.returnWorkerToPool(worker)}else if(cmd===\"objectTransfer\"){PThread.receiveObjectTransfer(e.data)}else if(e.data.target===\"setimmediate\"){worker.postMessage(e.data)}else{err(\"worker sent an unknown command \"+cmd)}PThread.currentProxiedOperationCallerThread=undefined};worker.onerror=function(e){err(\"pthread sent an error! \"+e.filename+\":\"+e.lineno+\": \"+e.message)};if(ENVIRONMENT_IS_NODE){worker.on(\"message\",function(data){worker.onmessage({data:data})});worker.on(\"error\",function(data){worker.onerror(data)});worker.on(\"exit\",function(data){})}worker.postMessage({\"cmd\":\"load\",\"urlOrBlob\":Module[\"mainScriptUrlOrBlob\"]||_scriptDir,\"wasmMemory\":wasmMemory,\"wasmModule\":wasmModule})},allocateUnusedWorker:function(){var pthreadMainJs=locateFile(\"tfjs-backend-wasm-threaded-simd.worker.js\");PThread.unusedWorkers.push(new Worker(pthreadMainJs))},getNewWorker:function(){if(PThread.unusedWorkers.length==0){PThread.allocateUnusedWorker();PThread.loadWasmModuleToWorker(PThread.unusedWorkers[0])}if(PThread.unusedWorkers.length>0)return PThread.unusedWorkers.pop();else return null},busySpinWait:function(msecs){var t=performance.now()+msecs;while(performance.now()>2]=value;return value}function _atexit(func,arg){if(ENVIRONMENT_IS_PTHREAD)return _emscripten_proxy_to_main_thread_js(1,1,func,arg)}function __emscripten_notify_thread_queue(targetThreadId,mainThreadId){if(targetThreadId==mainThreadId){postMessage({\"cmd\":\"processQueuedMainThreadWork\"})}else if(ENVIRONMENT_IS_PTHREAD){postMessage({\"targetThread\":targetThreadId,\"cmd\":\"processThreadQueue\"})}else{var pthread=PThread.pthreads[targetThreadId];var worker=pthread&&pthread.worker;if(!worker){return}worker.postMessage({\"cmd\":\"processThreadQueue\"})}return 1}function _abort(){abort()}function _emscripten_asm_const_int(code,sigPtr,argbuf){var args=readAsmConstArgs(sigPtr,argbuf);return ASM_CONSTS[code].apply(null,args)}function _emscripten_conditional_set_current_thread_status(expectedStatus,newStatus){}function _emscripten_futex_wait(addr,val,timeout){if(addr<=0||addr>GROWABLE_HEAP_I8().length||addr&3!=0)return-28;if(!ENVIRONMENT_IS_WEB){var ret=Atomics.wait(GROWABLE_HEAP_I32(),addr>>2,val,timeout);if(ret===\"timed-out\")return-73;if(ret===\"not-equal\")return-6;if(ret===\"ok\")return 0;throw\"Atomics.wait returned an unexpected value \"+ret}else{if(Atomics.load(GROWABLE_HEAP_I32(),addr>>2)!=val){return-6}var tNow=performance.now();var tEnd=tNow+timeout;var lastAddr=Atomics.exchange(GROWABLE_HEAP_I32(),__emscripten_main_thread_futex>>2,addr);while(1){tNow=performance.now();if(tNow>tEnd){lastAddr=Atomics.exchange(GROWABLE_HEAP_I32(),__emscripten_main_thread_futex>>2,0);return-73}lastAddr=Atomics.exchange(GROWABLE_HEAP_I32(),__emscripten_main_thread_futex>>2,0);if(lastAddr==0){break}_emscripten_main_thread_process_queued_calls();if(Atomics.load(GROWABLE_HEAP_I32(),addr>>2)!=val){return-6}lastAddr=Atomics.exchange(GROWABLE_HEAP_I32(),__emscripten_main_thread_futex>>2,addr)}return 0}}function _emscripten_memcpy_big(dest,src,num){GROWABLE_HEAP_U8().copyWithin(dest,src,src+num)}function _emscripten_num_logical_cores(){if(ENVIRONMENT_IS_NODE)return require(\"os\").cpus().length;return navigator[\"hardwareConcurrency\"]}function _emscripten_proxy_to_main_thread_js(index,sync){var numCallArgs=arguments.length-2;var stack=stackSave();var serializedNumCallArgs=numCallArgs;var args=stackAlloc(serializedNumCallArgs*8);var b=args>>3;for(var i=0;i>=2;while(ch=GROWABLE_HEAP_U8()[sigPtr++]){var double=ch<105;if(double&&buf&1)buf++;readAsmConstArgsArray.push(double?GROWABLE_HEAP_F64()[buf++>>1]:GROWABLE_HEAP_I32()[buf]);++buf}return readAsmConstArgsArray}function _emscripten_receive_on_main_thread_js(index,numCallArgs,args){_emscripten_receive_on_main_thread_js_callArgs.length=numCallArgs;var b=args>>3;for(var i=0;i>>16);updateGlobalBufferAndViews(wasmMemory.buffer);return 1}catch(e){}}function _emscripten_resize_heap(requestedSize){var oldSize=_emscripten_get_heap_size();if(requestedSize<=oldSize){return false}var maxHeapSize=2147483648;if(requestedSize>maxHeapSize){return false}for(var cutDown=1;cutDown<=4;cutDown*=2){var overGrownHeapSize=oldSize*(1+.2/cutDown);overGrownHeapSize=Math.min(overGrownHeapSize,requestedSize+100663296);var newSize=Math.min(maxHeapSize,alignUp(Math.max(requestedSize,overGrownHeapSize),65536));var replacement=emscripten_realloc_buffer(newSize);if(replacement){return true}}return false}var JSEvents={inEventHandler:0,removeAllEventListeners:function(){for(var i=JSEvents.eventHandlers.length-1;i>=0;--i){JSEvents._removeHandler(i)}JSEvents.eventHandlers=[];JSEvents.deferredCalls=[]},registerRemoveEventListeners:function(){if(!JSEvents.removeEventListenersRegistered){__ATEXIT__.push(JSEvents.removeAllEventListeners);JSEvents.removeEventListenersRegistered=true}},deferredCalls:[],deferCall:function(targetFunction,precedence,argsList){function arraysHaveEqualContent(arrA,arrB){if(arrA.length!=arrB.length)return false;for(var i in arrA){if(arrA[i]!=arrB[i])return false}return true}for(var i in JSEvents.deferredCalls){var call=JSEvents.deferredCalls[i];if(call.targetFunction==targetFunction&&arraysHaveEqualContent(call.argsList,argsList)){return}}JSEvents.deferredCalls.push({targetFunction:targetFunction,precedence:precedence,argsList:argsList});JSEvents.deferredCalls.sort(function(x,y){return x.precedence>2]=eventTypeId;GROWABLE_HEAP_I32()[varargs+4>>2]=eventData;GROWABLE_HEAP_I32()[varargs+8>>2]=userData;__emscripten_call_on_thread(0,targetThread,637534208,eventHandlerFunc,eventData,varargs);stackRestore(stackTop)},getTargetThreadForEventCallback:function(targetThread){switch(targetThread){case 1:return 0;case 2:return PThread.currentProxiedOperationCallerThread;default:return targetThread}},getNodeNameForTarget:function(target){if(!target)return\"\";if(target==window)return\"#window\";if(target==screen)return\"#screen\";return target&&target.nodeName?target.nodeName:\"\"},fullscreenEnabled:function(){return document.fullscreenEnabled||document.webkitFullscreenEnabled}};function stringToNewUTF8(jsString){var length=lengthBytesUTF8(jsString)+1;var cString=_malloc(length);stringToUTF8(jsString,cString,length);return cString}function _emscripten_set_offscreencanvas_size_on_target_thread_js(targetThread,targetCanvas,width,height){var stackTop=stackSave();var varargs=stackAlloc(12);var targetCanvasPtr=0;if(targetCanvas){targetCanvasPtr=stringToNewUTF8(targetCanvas)}GROWABLE_HEAP_I32()[varargs>>2]=targetCanvasPtr;GROWABLE_HEAP_I32()[varargs+4>>2]=width;GROWABLE_HEAP_I32()[varargs+8>>2]=height;__emscripten_call_on_thread(0,targetThread,657457152,0,targetCanvasPtr,varargs);stackRestore(stackTop)}function _emscripten_set_offscreencanvas_size_on_target_thread(targetThread,targetCanvas,width,height){targetCanvas=targetCanvas?UTF8ToString(targetCanvas):\"\";_emscripten_set_offscreencanvas_size_on_target_thread_js(targetThread,targetCanvas,width,height)}function maybeCStringToJsString(cString){return cString>2?UTF8ToString(cString):cString}var specialHTMLTargets=[0,typeof document!==\"undefined\"?document:0,typeof window!==\"undefined\"?window:0];function findEventTarget(target){target=maybeCStringToJsString(target);var domElement=specialHTMLTargets[target]||(typeof document!==\"undefined\"?document.querySelector(target):undefined);return domElement}function findCanvasEventTarget(target){return findEventTarget(target)}function _emscripten_set_canvas_element_size_calling_thread(target,width,height){var canvas=findCanvasEventTarget(target);if(!canvas)return-4;if(canvas.canvasSharedPtr){GROWABLE_HEAP_I32()[canvas.canvasSharedPtr>>2]=width;GROWABLE_HEAP_I32()[canvas.canvasSharedPtr+4>>2]=height}if(canvas.offscreenCanvas||!canvas.controlTransferredOffscreen){if(canvas.offscreenCanvas)canvas=canvas.offscreenCanvas;var autoResizeViewport=false;if(canvas.GLctxObject&&canvas.GLctxObject.GLctx){var prevViewport=canvas.GLctxObject.GLctx.getParameter(2978);autoResizeViewport=prevViewport[0]===0&&prevViewport[1]===0&&prevViewport[2]===canvas.width&&prevViewport[3]===canvas.height}canvas.width=width;canvas.height=height;if(autoResizeViewport){canvas.GLctxObject.GLctx.viewport(0,0,width,height)}}else if(canvas.canvasSharedPtr){var targetThread=GROWABLE_HEAP_I32()[canvas.canvasSharedPtr+8>>2];_emscripten_set_offscreencanvas_size_on_target_thread(targetThread,target,width,height);return 1}else{return-4}return 0}function _emscripten_set_canvas_element_size_main_thread(target,width,height){if(ENVIRONMENT_IS_PTHREAD)return _emscripten_proxy_to_main_thread_js(2,1,target,width,height);return _emscripten_set_canvas_element_size_calling_thread(target,width,height)}function _emscripten_set_canvas_element_size(target,width,height){var canvas=findCanvasEventTarget(target);if(canvas){return _emscripten_set_canvas_element_size_calling_thread(target,width,height)}else{return _emscripten_set_canvas_element_size_main_thread(target,width,height)}}function _emscripten_set_current_thread_status(newStatus){}function _emscripten_set_thread_name(threadId,name){}function __webgl_enable_ANGLE_instanced_arrays(ctx){var ext=ctx.getExtension(\"ANGLE_instanced_arrays\");if(ext){ctx[\"vertexAttribDivisor\"]=function(index,divisor){ext[\"vertexAttribDivisorANGLE\"](index,divisor)};ctx[\"drawArraysInstanced\"]=function(mode,first,count,primcount){ext[\"drawArraysInstancedANGLE\"](mode,first,count,primcount)};ctx[\"drawElementsInstanced\"]=function(mode,count,type,indices,primcount){ext[\"drawElementsInstancedANGLE\"](mode,count,type,indices,primcount)};return 1}}function __webgl_enable_OES_vertex_array_object(ctx){var ext=ctx.getExtension(\"OES_vertex_array_object\");if(ext){ctx[\"createVertexArray\"]=function(){return ext[\"createVertexArrayOES\"]()};ctx[\"deleteVertexArray\"]=function(vao){ext[\"deleteVertexArrayOES\"](vao)};ctx[\"bindVertexArray\"]=function(vao){ext[\"bindVertexArrayOES\"](vao)};ctx[\"isVertexArray\"]=function(vao){return ext[\"isVertexArrayOES\"](vao)};return 1}}function __webgl_enable_WEBGL_draw_buffers(ctx){var ext=ctx.getExtension(\"WEBGL_draw_buffers\");if(ext){ctx[\"drawBuffers\"]=function(n,bufs){ext[\"drawBuffersWEBGL\"](n,bufs)};return 1}}function __webgl_enable_WEBGL_multi_draw(ctx){return!!(ctx.multiDrawWebgl=ctx.getExtension(\"WEBGL_multi_draw\"))}var GL={counter:1,buffers:[],programs:[],framebuffers:[],renderbuffers:[],textures:[],uniforms:[],shaders:[],vaos:[],contexts:{},offscreenCanvases:{},timerQueriesEXT:[],programInfos:{},stringCache:{},unpackAlignment:4,recordError:function recordError(errorCode){if(!GL.lastError){GL.lastError=errorCode}},getNewId:function(table){var ret=GL.counter++;for(var i=table.length;i>2]:-1;source+=UTF8ToString(GROWABLE_HEAP_I32()[string+i*4>>2],len<0?undefined:len)}return source},createContext:function(canvas,webGLContextAttributes){var ctx=canvas.getContext(\"webgl\",webGLContextAttributes);if(!ctx)return 0;var handle=GL.registerContext(ctx,webGLContextAttributes);return handle},registerContext:function(ctx,webGLContextAttributes){var handle=_malloc(8);GROWABLE_HEAP_I32()[handle+4>>2]=_pthread_self();var context={handle:handle,attributes:webGLContextAttributes,version:webGLContextAttributes.majorVersion,GLctx:ctx};if(ctx.canvas)ctx.canvas.GLctxObject=context;GL.contexts[handle]=context;if(typeof webGLContextAttributes.enableExtensionsByDefault===\"undefined\"||webGLContextAttributes.enableExtensionsByDefault){GL.initExtensions(context)}return handle},makeContextCurrent:function(contextHandle){GL.currentContext=GL.contexts[contextHandle];Module.ctx=GLctx=GL.currentContext&&GL.currentContext.GLctx;return!(contextHandle&&!GLctx)},getContext:function(contextHandle){return GL.contexts[contextHandle]},deleteContext:function(contextHandle){if(GL.currentContext===GL.contexts[contextHandle])GL.currentContext=null;if(typeof JSEvents===\"object\")JSEvents.removeAllHandlersOnTarget(GL.contexts[contextHandle].GLctx.canvas);if(GL.contexts[contextHandle]&&GL.contexts[contextHandle].GLctx.canvas)GL.contexts[contextHandle].GLctx.canvas.GLctxObject=undefined;_free(GL.contexts[contextHandle].handle);GL.contexts[contextHandle]=null},initExtensions:function(context){if(!context)context=GL.currentContext;if(context.initExtensionsDone)return;context.initExtensionsDone=true;var GLctx=context.GLctx;__webgl_enable_ANGLE_instanced_arrays(GLctx);__webgl_enable_OES_vertex_array_object(GLctx);__webgl_enable_WEBGL_draw_buffers(GLctx);GLctx.disjointTimerQueryExt=GLctx.getExtension(\"EXT_disjoint_timer_query\");__webgl_enable_WEBGL_multi_draw(GLctx);var exts=GLctx.getSupportedExtensions()||[];exts.forEach(function(ext){if(ext.indexOf(\"lose_context\")<0&&ext.indexOf(\"debug\")<0){GLctx.getExtension(ext)}})},populateUniformTable:function(program){var p=GL.programs[program];var ptable=GL.programInfos[program]={uniforms:{},maxUniformLength:0,maxAttributeLength:-1,maxUniformBlockNameLength:-1};var utable=ptable.uniforms;var numUniforms=GLctx.getProgramParameter(p,35718);for(var i=0;i>2;var powerPreference=GROWABLE_HEAP_I32()[a+(24>>2)];var contextAttributes={\"alpha\":!!GROWABLE_HEAP_I32()[a+(0>>2)],\"depth\":!!GROWABLE_HEAP_I32()[a+(4>>2)],\"stencil\":!!GROWABLE_HEAP_I32()[a+(8>>2)],\"antialias\":!!GROWABLE_HEAP_I32()[a+(12>>2)],\"premultipliedAlpha\":!!GROWABLE_HEAP_I32()[a+(16>>2)],\"preserveDrawingBuffer\":!!GROWABLE_HEAP_I32()[a+(20>>2)],\"powerPreference\":__emscripten_webgl_power_preferences[powerPreference],\"failIfMajorPerformanceCaveat\":!!GROWABLE_HEAP_I32()[a+(28>>2)],majorVersion:GROWABLE_HEAP_I32()[a+(32>>2)],minorVersion:GROWABLE_HEAP_I32()[a+(36>>2)],enableExtensionsByDefault:GROWABLE_HEAP_I32()[a+(40>>2)],explicitSwapControl:GROWABLE_HEAP_I32()[a+(44>>2)],proxyContextToMainThread:GROWABLE_HEAP_I32()[a+(48>>2)],renderViaOffscreenBackBuffer:GROWABLE_HEAP_I32()[a+(52>>2)]};var canvas=findCanvasEventTarget(target);if(!canvas){return 0}if(contextAttributes.explicitSwapControl){return 0}var contextHandle=GL.createContext(canvas,contextAttributes);return contextHandle}function _emscripten_webgl_create_context(a0,a1){return _emscripten_webgl_do_create_context(a0,a1)}var SYSCALLS={mappings:{},buffers:[null,[],[]],printChar:function(stream,curr){var buffer=SYSCALLS.buffers[stream];if(curr===0||curr===10){(stream===1?out:err)(UTF8ArrayToString(buffer,0));buffer.length=0}else{buffer.push(curr)}},varargs:undefined,get:function(){SYSCALLS.varargs+=4;var ret=GROWABLE_HEAP_I32()[SYSCALLS.varargs-4>>2];return ret},getStr:function(ptr){var ret=UTF8ToString(ptr);return ret},get64:function(low,high){return low}};function _fd_close(fd){if(ENVIRONMENT_IS_PTHREAD)return _emscripten_proxy_to_main_thread_js(3,1,fd);return 0}function _fd_seek(fd,offset_low,offset_high,whence,newOffset){if(ENVIRONMENT_IS_PTHREAD)return _emscripten_proxy_to_main_thread_js(4,1,fd,offset_low,offset_high,whence,newOffset)}function _fd_write(fd,iov,iovcnt,pnum){if(ENVIRONMENT_IS_PTHREAD)return _emscripten_proxy_to_main_thread_js(5,1,fd,iov,iovcnt,pnum);var num=0;for(var i=0;i>2];var len=GROWABLE_HEAP_I32()[iov+(i*8+4)>>2];for(var j=0;j>2]=num;return 0}function _pthread_cleanup_pop(execute){var routine=PThread.threadExitHandlers.pop();if(execute)routine()}function _pthread_cleanup_push(routine,arg){PThread.threadExitHandlers.push(function(){wasmTable.get(routine)(arg)})}function spawnThread(threadParams){if(ENVIRONMENT_IS_PTHREAD)throw\"Internal Error! spawnThread() can only ever be called from main application thread!\";var worker=PThread.getNewWorker();if(worker.pthread!==undefined)throw\"Internal error!\";if(!threadParams.pthread_ptr)throw\"Internal error, no pthread ptr!\";PThread.runningWorkers.push(worker);var tlsMemory=_malloc(128*4);for(var i=0;i<128;++i){GROWABLE_HEAP_I32()[tlsMemory+i*4>>2]=0}var stackHigh=threadParams.stackBase+threadParams.stackSize;var pthread=PThread.pthreads[threadParams.pthread_ptr]={worker:worker,stackBase:threadParams.stackBase,stackSize:threadParams.stackSize,allocatedOwnStack:threadParams.allocatedOwnStack,threadInfoStruct:threadParams.pthread_ptr};var tis=pthread.threadInfoStruct>>2;Atomics.store(GROWABLE_HEAP_U32(),tis+(64>>2),threadParams.detached);Atomics.store(GROWABLE_HEAP_U32(),tis+(100>>2),tlsMemory);Atomics.store(GROWABLE_HEAP_U32(),tis+(40>>2),pthread.threadInfoStruct);Atomics.store(GROWABLE_HEAP_U32(),tis+(80>>2),threadParams.stackSize);Atomics.store(GROWABLE_HEAP_U32(),tis+(76>>2),stackHigh);Atomics.store(GROWABLE_HEAP_U32(),tis+(104>>2),threadParams.stackSize);Atomics.store(GROWABLE_HEAP_U32(),tis+(104+8>>2),stackHigh);Atomics.store(GROWABLE_HEAP_U32(),tis+(104+12>>2),threadParams.detached);var global_libc=_emscripten_get_global_libc();var global_locale=global_libc+40;Atomics.store(GROWABLE_HEAP_U32(),tis+(172>>2),global_locale);worker.pthread=pthread;var msg={\"cmd\":\"run\",\"start_routine\":threadParams.startRoutine,\"arg\":threadParams.arg,\"threadInfoStruct\":threadParams.pthread_ptr,\"stackBase\":threadParams.stackBase,\"stackSize\":threadParams.stackSize};worker.runPthread=function(){msg.time=performance.now();worker.postMessage(msg,threadParams.transferList)};if(worker.loaded){worker.runPthread();delete worker.runPthread}}function _pthread_create(pthread_ptr,attr,start_routine,arg){if(typeof SharedArrayBuffer===\"undefined\"){err(\"Current environment does not support SharedArrayBuffer, pthreads are not available!\");return 6}if(!pthread_ptr){err(\"pthread_create called with a null thread pointer!\");return 28}var transferList=[];var error=0;if(ENVIRONMENT_IS_PTHREAD&&(transferList.length===0||error)){return _emscripten_sync_run_in_main_thread_4(687865856,pthread_ptr,attr,start_routine,arg)}if(error)return error;var stackSize=0;var stackBase=0;var detached=0;if(attr&&attr!=-1){stackSize=GROWABLE_HEAP_I32()[attr>>2];stackSize+=81920;stackBase=GROWABLE_HEAP_I32()[attr+8>>2];detached=GROWABLE_HEAP_I32()[attr+12>>2]!==0}else{stackSize=2097152}var allocatedOwnStack=stackBase==0;if(allocatedOwnStack){stackBase=_memalign(16,stackSize)}else{stackBase-=stackSize;assert(stackBase>0)}var threadInfoStruct=_malloc(228);for(var i=0;i<228>>2;++i)GROWABLE_HEAP_U32()[(threadInfoStruct>>2)+i]=0;GROWABLE_HEAP_I32()[pthread_ptr>>2]=threadInfoStruct;GROWABLE_HEAP_I32()[threadInfoStruct+12>>2]=threadInfoStruct;var headPtr=threadInfoStruct+152;GROWABLE_HEAP_I32()[headPtr>>2]=headPtr;var threadParams={stackBase:stackBase,stackSize:stackSize,allocatedOwnStack:allocatedOwnStack,detached:detached,startRoutine:start_routine,pthread_ptr:threadInfoStruct,arg:arg,transferList:transferList};if(ENVIRONMENT_IS_PTHREAD){threadParams.cmd=\"spawnThread\";postMessage(threadParams,transferList)}else{spawnThread(threadParams)}return 0}function _sysconf(name){if(ENVIRONMENT_IS_PTHREAD)return _emscripten_proxy_to_main_thread_js(6,1,name);switch(name){case 30:return 16384;case 85:var maxHeapSize=2147483648;return maxHeapSize/16384;case 132:case 133:case 12:case 137:case 138:case 15:case 235:case 16:case 17:case 18:case 19:case 20:case 149:case 13:case 10:case 236:case 153:case 9:case 21:case 22:case 159:case 154:case 14:case 77:case 78:case 139:case 82:case 68:case 67:case 164:case 11:case 29:case 47:case 48:case 95:case 52:case 51:case 46:return 200809;case 27:case 246:case 127:case 128:case 23:case 24:case 160:case 161:case 181:case 182:case 242:case 183:case 184:case 243:case 244:case 245:case 165:case 178:case 179:case 49:case 50:case 168:case 169:case 175:case 170:case 171:case 172:case 97:case 76:case 32:case 173:case 35:case 80:case 81:case 79:return-1;case 176:case 177:case 7:case 155:case 8:case 157:case 125:case 126:case 92:case 93:case 129:case 130:case 131:case 94:case 91:return 1;case 74:case 60:case 69:case 70:case 4:return 1024;case 31:case 42:case 72:return 32;case 87:case 26:case 33:return 2147483647;case 34:case 1:return 47839;case 38:case 36:return 99;case 43:case 37:return 2048;case 0:return 2097152;case 3:return 65536;case 28:return 32768;case 44:return 32767;case 75:return 16384;case 39:return 1e3;case 89:return 700;case 71:return 256;case 40:return 255;case 2:return 100;case 180:return 64;case 25:return 20;case 5:return 16;case 6:return 6;case 73:return 4;case 84:{if(typeof navigator===\"object\")return navigator[\"hardwareConcurrency\"]||1;return 1}}setErrNo(28);return-1}if(!ENVIRONMENT_IS_PTHREAD)PThread.initMainThreadBlock();var GLctx;var proxiedFunctionTable=[null,_atexit,_emscripten_set_canvas_element_size_main_thread,_fd_close,_fd_seek,_fd_write,_sysconf];var asmLibraryArg={\"e\":___assert_fail,\"r\":___call_main,\"x\":__emscripten_notify_thread_queue,\"b\":_abort,\"y\":_emscripten_asm_const_int,\"j\":_emscripten_conditional_set_current_thread_status,\"c\":_emscripten_futex_wait,\"d\":_emscripten_futex_wake,\"f\":_emscripten_get_now,\"p\":_emscripten_memcpy_big,\"z\":_emscripten_num_logical_cores,\"u\":_emscripten_receive_on_main_thread_js,\"q\":_emscripten_resize_heap,\"v\":_emscripten_set_canvas_element_size,\"i\":_emscripten_set_current_thread_status,\"t\":_emscripten_set_thread_name,\"w\":_emscripten_webgl_create_context,\"m\":_fd_close,\"n\":_fd_seek,\"g\":_fd_write,\"o\":initPthreadsJS,\"a\":wasmMemory||Module[\"wasmMemory\"],\"k\":_pthread_cleanup_pop,\"l\":_pthread_cleanup_push,\"h\":_pthread_create,\"s\":_sysconf};var asm=createWasm();var ___wasm_call_ctors=Module[\"___wasm_call_ctors\"]=function(){return(___wasm_call_ctors=Module[\"___wasm_call_ctors\"]=Module[\"asm\"][\"A\"]).apply(null,arguments)};var _init=Module[\"_init\"]=function(){return(_init=Module[\"_init\"]=Module[\"asm\"][\"B\"]).apply(null,arguments)};var _register_tensor=Module[\"_register_tensor\"]=function(){return(_register_tensor=Module[\"_register_tensor\"]=Module[\"asm\"][\"C\"]).apply(null,arguments)};var _dispose_data=Module[\"_dispose_data\"]=function(){return(_dispose_data=Module[\"_dispose_data\"]=Module[\"asm\"][\"D\"]).apply(null,arguments)};var _dispose=Module[\"_dispose\"]=function(){return(_dispose=Module[\"_dispose\"]=Module[\"asm\"][\"E\"]).apply(null,arguments)};var _Abs=Module[\"_Abs\"]=function(){return(_Abs=Module[\"_Abs\"]=Module[\"asm\"][\"G\"]).apply(null,arguments)};var _Add=Module[\"_Add\"]=function(){return(_Add=Module[\"_Add\"]=Module[\"asm\"][\"H\"]).apply(null,arguments)};var _AddN=Module[\"_AddN\"]=function(){return(_AddN=Module[\"_AddN\"]=Module[\"asm\"][\"I\"]).apply(null,arguments)};var _All=Module[\"_All\"]=function(){return(_All=Module[\"_All\"]=Module[\"asm\"][\"J\"]).apply(null,arguments)};var _Any=Module[\"_Any\"]=function(){return(_Any=Module[\"_Any\"]=Module[\"asm\"][\"K\"]).apply(null,arguments)};var _ArgMax=Module[\"_ArgMax\"]=function(){return(_ArgMax=Module[\"_ArgMax\"]=Module[\"asm\"][\"L\"]).apply(null,arguments)};var _AvgPool=Module[\"_AvgPool\"]=function(){return(_AvgPool=Module[\"_AvgPool\"]=Module[\"asm\"][\"M\"]).apply(null,arguments)};var _BatchMatMul=Module[\"_BatchMatMul\"]=function(){return(_BatchMatMul=Module[\"_BatchMatMul\"]=Module[\"asm\"][\"N\"]).apply(null,arguments)};var _Ceil=Module[\"_Ceil\"]=function(){return(_Ceil=Module[\"_Ceil\"]=Module[\"asm\"][\"O\"]).apply(null,arguments)};var _ClipByValue=Module[\"_ClipByValue\"]=function(){return(_ClipByValue=Module[\"_ClipByValue\"]=Module[\"asm\"][\"P\"]).apply(null,arguments)};var _Conv2D=Module[\"_Conv2D\"]=function(){return(_Conv2D=Module[\"_Conv2D\"]=Module[\"asm\"][\"Q\"]).apply(null,arguments)};var _Conv2DBackpropInput=Module[\"_Conv2DBackpropInput\"]=function(){return(_Conv2DBackpropInput=Module[\"_Conv2DBackpropInput\"]=Module[\"asm\"][\"R\"]).apply(null,arguments)};var _Cos=Module[\"_Cos\"]=function(){return(_Cos=Module[\"_Cos\"]=Module[\"asm\"][\"S\"]).apply(null,arguments)};var _Cosh=Module[\"_Cosh\"]=function(){return(_Cosh=Module[\"_Cosh\"]=Module[\"asm\"][\"T\"]).apply(null,arguments)};var _CropAndResize=Module[\"_CropAndResize\"]=function(){return(_CropAndResize=Module[\"_CropAndResize\"]=Module[\"asm\"][\"U\"]).apply(null,arguments)};var _Cumsum=Module[\"_Cumsum\"]=function(){return(_Cumsum=Module[\"_Cumsum\"]=Module[\"asm\"][\"V\"]).apply(null,arguments)};var _DepthToSpace=Module[\"_DepthToSpace\"]=function(){return(_DepthToSpace=Module[\"_DepthToSpace\"]=Module[\"asm\"][\"W\"]).apply(null,arguments)};var _DepthwiseConv2dNative=Module[\"_DepthwiseConv2dNative\"]=function(){return(_DepthwiseConv2dNative=Module[\"_DepthwiseConv2dNative\"]=Module[\"asm\"][\"X\"]).apply(null,arguments)};var _Elu=Module[\"_Elu\"]=function(){return(_Elu=Module[\"_Elu\"]=Module[\"asm\"][\"Y\"]).apply(null,arguments)};var _Equal=Module[\"_Equal\"]=function(){return(_Equal=Module[\"_Equal\"]=Module[\"asm\"][\"Z\"]).apply(null,arguments)};var _Exp=Module[\"_Exp\"]=function(){return(_Exp=Module[\"_Exp\"]=Module[\"asm\"][\"_\"]).apply(null,arguments)};var _FlipLeftRight=Module[\"_FlipLeftRight\"]=function(){return(_FlipLeftRight=Module[\"_FlipLeftRight\"]=Module[\"asm\"][\"$\"]).apply(null,arguments)};var _Floor=Module[\"_Floor\"]=function(){return(_Floor=Module[\"_Floor\"]=Module[\"asm\"][\"aa\"]).apply(null,arguments)};var _FloorDiv=Module[\"_FloorDiv\"]=function(){return(_FloorDiv=Module[\"_FloorDiv\"]=Module[\"asm\"][\"ba\"]).apply(null,arguments)};var _FusedBatchNorm=Module[\"_FusedBatchNorm\"]=function(){return(_FusedBatchNorm=Module[\"_FusedBatchNorm\"]=Module[\"asm\"][\"ca\"]).apply(null,arguments)};var _FusedConv2D=Module[\"_FusedConv2D\"]=function(){return(_FusedConv2D=Module[\"_FusedConv2D\"]=Module[\"asm\"][\"da\"]).apply(null,arguments)};var _FusedDepthwiseConv2D=Module[\"_FusedDepthwiseConv2D\"]=function(){return(_FusedDepthwiseConv2D=Module[\"_FusedDepthwiseConv2D\"]=Module[\"asm\"][\"ea\"]).apply(null,arguments)};var _Gather=Module[\"_Gather\"]=function(){return(_Gather=Module[\"_Gather\"]=Module[\"asm\"][\"fa\"]).apply(null,arguments)};var _GatherNd=Module[\"_GatherNd\"]=function(){return(_GatherNd=Module[\"_GatherNd\"]=Module[\"asm\"][\"ga\"]).apply(null,arguments)};var _Greater=Module[\"_Greater\"]=function(){return(_Greater=Module[\"_Greater\"]=Module[\"asm\"][\"ha\"]).apply(null,arguments)};var _GreaterEqual=Module[\"_GreaterEqual\"]=function(){return(_GreaterEqual=Module[\"_GreaterEqual\"]=Module[\"asm\"][\"ia\"]).apply(null,arguments)};var _LeakyRelu=Module[\"_LeakyRelu\"]=function(){return(_LeakyRelu=Module[\"_LeakyRelu\"]=Module[\"asm\"][\"ja\"]).apply(null,arguments)};var _Less=Module[\"_Less\"]=function(){return(_Less=Module[\"_Less\"]=Module[\"asm\"][\"ka\"]).apply(null,arguments)};var _LessEqual=Module[\"_LessEqual\"]=function(){return(_LessEqual=Module[\"_LessEqual\"]=Module[\"asm\"][\"la\"]).apply(null,arguments)};var _Log=Module[\"_Log\"]=function(){return(_Log=Module[\"_Log\"]=Module[\"asm\"][\"ma\"]).apply(null,arguments)};var _LogicalAnd=Module[\"_LogicalAnd\"]=function(){return(_LogicalAnd=Module[\"_LogicalAnd\"]=Module[\"asm\"][\"na\"]).apply(null,arguments)};var _Max=Module[\"_Max\"]=function(){return(_Max=Module[\"_Max\"]=Module[\"asm\"][\"oa\"]).apply(null,arguments)};var _MaxPool=Module[\"_MaxPool\"]=function(){return(_MaxPool=Module[\"_MaxPool\"]=Module[\"asm\"][\"pa\"]).apply(null,arguments)};var _Maximum=Module[\"_Maximum\"]=function(){return(_Maximum=Module[\"_Maximum\"]=Module[\"asm\"][\"qa\"]).apply(null,arguments)};var _Mean=Module[\"_Mean\"]=function(){return(_Mean=Module[\"_Mean\"]=Module[\"asm\"][\"ra\"]).apply(null,arguments)};var _Min=Module[\"_Min\"]=function(){return(_Min=Module[\"_Min\"]=Module[\"asm\"][\"sa\"]).apply(null,arguments)};var _Minimum=Module[\"_Minimum\"]=function(){return(_Minimum=Module[\"_Minimum\"]=Module[\"asm\"][\"ta\"]).apply(null,arguments)};var _MirrorPad=Module[\"_MirrorPad\"]=function(){return(_MirrorPad=Module[\"_MirrorPad\"]=Module[\"asm\"][\"ua\"]).apply(null,arguments)};var _Multiply=Module[\"_Multiply\"]=function(){return(_Multiply=Module[\"_Multiply\"]=Module[\"asm\"][\"va\"]).apply(null,arguments)};var _Neg=Module[\"_Neg\"]=function(){return(_Neg=Module[\"_Neg\"]=Module[\"asm\"][\"wa\"]).apply(null,arguments)};var _NonMaxSuppressionV3=Module[\"_NonMaxSuppressionV3\"]=function(){return(_NonMaxSuppressionV3=Module[\"_NonMaxSuppressionV3\"]=Module[\"asm\"][\"xa\"]).apply(null,arguments)};var _NonMaxSuppressionV4=Module[\"_NonMaxSuppressionV4\"]=function(){return(_NonMaxSuppressionV4=Module[\"_NonMaxSuppressionV4\"]=Module[\"asm\"][\"ya\"]).apply(null,arguments)};var _NonMaxSuppressionV5=Module[\"_NonMaxSuppressionV5\"]=function(){return(_NonMaxSuppressionV5=Module[\"_NonMaxSuppressionV5\"]=Module[\"asm\"][\"za\"]).apply(null,arguments)};var _NotEqual=Module[\"_NotEqual\"]=function(){return(_NotEqual=Module[\"_NotEqual\"]=Module[\"asm\"][\"Aa\"]).apply(null,arguments)};var _OneHot=Module[\"_OneHot\"]=function(){return(_OneHot=Module[\"_OneHot\"]=Module[\"asm\"][\"Ba\"]).apply(null,arguments)};var _PadV2=Module[\"_PadV2\"]=function(){return(_PadV2=Module[\"_PadV2\"]=Module[\"asm\"][\"Ca\"]).apply(null,arguments)};var _Pow=Module[\"_Pow\"]=function(){return(_Pow=Module[\"_Pow\"]=Module[\"asm\"][\"Da\"]).apply(null,arguments)};var _Prelu=Module[\"_Prelu\"]=function(){return(_Prelu=Module[\"_Prelu\"]=Module[\"asm\"][\"Ea\"]).apply(null,arguments)};var _Prod=Module[\"_Prod\"]=function(){return(_Prod=Module[\"_Prod\"]=Module[\"asm\"][\"Fa\"]).apply(null,arguments)};var _RealDiv=Module[\"_RealDiv\"]=function(){return(_RealDiv=Module[\"_RealDiv\"]=Module[\"asm\"][\"Ga\"]).apply(null,arguments)};var _Relu=Module[\"_Relu\"]=function(){return(_Relu=Module[\"_Relu\"]=Module[\"asm\"][\"Ha\"]).apply(null,arguments)};var _Relu6=Module[\"_Relu6\"]=function(){return(_Relu6=Module[\"_Relu6\"]=Module[\"asm\"][\"Ia\"]).apply(null,arguments)};var _ResizeBilinear=Module[\"_ResizeBilinear\"]=function(){return(_ResizeBilinear=Module[\"_ResizeBilinear\"]=Module[\"asm\"][\"Ja\"]).apply(null,arguments)};var _Reverse=Module[\"_Reverse\"]=function(){return(_Reverse=Module[\"_Reverse\"]=Module[\"asm\"][\"Ka\"]).apply(null,arguments)};var _RotateWithOffset=Module[\"_RotateWithOffset\"]=function(){return(_RotateWithOffset=Module[\"_RotateWithOffset\"]=Module[\"asm\"][\"La\"]).apply(null,arguments)};var _Round=Module[\"_Round\"]=function(){return(_Round=Module[\"_Round\"]=Module[\"asm\"][\"Ma\"]).apply(null,arguments)};var _Rsqrt=Module[\"_Rsqrt\"]=function(){return(_Rsqrt=Module[\"_Rsqrt\"]=Module[\"asm\"][\"Na\"]).apply(null,arguments)};var _ScatterNd=Module[\"_ScatterNd\"]=function(){return(_ScatterNd=Module[\"_ScatterNd\"]=Module[\"asm\"][\"Oa\"]).apply(null,arguments)};var _SelectV2=Module[\"_SelectV2\"]=function(){return(_SelectV2=Module[\"_SelectV2\"]=Module[\"asm\"][\"Pa\"]).apply(null,arguments)};var _Sigmoid=Module[\"_Sigmoid\"]=function(){return(_Sigmoid=Module[\"_Sigmoid\"]=Module[\"asm\"][\"Qa\"]).apply(null,arguments)};var _Sin=Module[\"_Sin\"]=function(){return(_Sin=Module[\"_Sin\"]=Module[\"asm\"][\"Ra\"]).apply(null,arguments)};var _Softmax=Module[\"_Softmax\"]=function(){return(_Softmax=Module[\"_Softmax\"]=Module[\"asm\"][\"Sa\"]).apply(null,arguments)};var _Sqrt=Module[\"_Sqrt\"]=function(){return(_Sqrt=Module[\"_Sqrt\"]=Module[\"asm\"][\"Ta\"]).apply(null,arguments)};var _Square=Module[\"_Square\"]=function(){return(_Square=Module[\"_Square\"]=Module[\"asm\"][\"Ua\"]).apply(null,arguments)};var _SquaredDifference=Module[\"_SquaredDifference\"]=function(){return(_SquaredDifference=Module[\"_SquaredDifference\"]=Module[\"asm\"][\"Va\"]).apply(null,arguments)};var _Step=Module[\"_Step\"]=function(){return(_Step=Module[\"_Step\"]=Module[\"asm\"][\"Wa\"]).apply(null,arguments)};var _StridedSlice=Module[\"_StridedSlice\"]=function(){return(_StridedSlice=Module[\"_StridedSlice\"]=Module[\"asm\"][\"Xa\"]).apply(null,arguments)};var _Sub=Module[\"_Sub\"]=function(){return(_Sub=Module[\"_Sub\"]=Module[\"asm\"][\"Ya\"]).apply(null,arguments)};var _Sum=Module[\"_Sum\"]=function(){return(_Sum=Module[\"_Sum\"]=Module[\"asm\"][\"Za\"]).apply(null,arguments)};var _Tan=Module[\"_Tan\"]=function(){return(_Tan=Module[\"_Tan\"]=Module[\"asm\"][\"_a\"]).apply(null,arguments)};var _Tanh=Module[\"_Tanh\"]=function(){return(_Tanh=Module[\"_Tanh\"]=Module[\"asm\"][\"$a\"]).apply(null,arguments)};var _Tile=Module[\"_Tile\"]=function(){return(_Tile=Module[\"_Tile\"]=Module[\"asm\"][\"ab\"]).apply(null,arguments)};var _TopK=Module[\"_TopK\"]=function(){return(_TopK=Module[\"_TopK\"]=Module[\"asm\"][\"bb\"]).apply(null,arguments)};var _Transform=Module[\"_Transform\"]=function(){return(_Transform=Module[\"_Transform\"]=Module[\"asm\"][\"cb\"]).apply(null,arguments)};var _Transpose=Module[\"_Transpose\"]=function(){return(_Transpose=Module[\"_Transpose\"]=Module[\"asm\"][\"db\"]).apply(null,arguments)};var __FusedMatMul=Module[\"__FusedMatMul\"]=function(){return(__FusedMatMul=Module[\"__FusedMatMul\"]=Module[\"asm\"][\"eb\"]).apply(null,arguments)};var _malloc=Module[\"_malloc\"]=function(){return(_malloc=Module[\"_malloc\"]=Module[\"asm\"][\"fb\"]).apply(null,arguments)};var _free=Module[\"_free\"]=function(){return(_free=Module[\"_free\"]=Module[\"asm\"][\"gb\"]).apply(null,arguments)};var ___errno_location=Module[\"___errno_location\"]=function(){return(___errno_location=Module[\"___errno_location\"]=Module[\"asm\"][\"hb\"]).apply(null,arguments)};var _emscripten_get_global_libc=Module[\"_emscripten_get_global_libc\"]=function(){return(_emscripten_get_global_libc=Module[\"_emscripten_get_global_libc\"]=Module[\"asm\"][\"ib\"]).apply(null,arguments)};var _pthread_self=Module[\"_pthread_self\"]=function(){return(_pthread_self=Module[\"_pthread_self\"]=Module[\"asm\"][\"jb\"]).apply(null,arguments)};var ___pthread_tsd_run_dtors=Module[\"___pthread_tsd_run_dtors\"]=function(){return(___pthread_tsd_run_dtors=Module[\"___pthread_tsd_run_dtors\"]=Module[\"asm\"][\"kb\"]).apply(null,arguments)};var _emscripten_main_thread_process_queued_calls=Module[\"_emscripten_main_thread_process_queued_calls\"]=function(){return(_emscripten_main_thread_process_queued_calls=Module[\"_emscripten_main_thread_process_queued_calls\"]=Module[\"asm\"][\"lb\"]).apply(null,arguments)};var _emscripten_current_thread_process_queued_calls=Module[\"_emscripten_current_thread_process_queued_calls\"]=function(){return(_emscripten_current_thread_process_queued_calls=Module[\"_emscripten_current_thread_process_queued_calls\"]=Module[\"asm\"][\"mb\"]).apply(null,arguments)};var _emscripten_register_main_browser_thread_id=Module[\"_emscripten_register_main_browser_thread_id\"]=function(){return(_emscripten_register_main_browser_thread_id=Module[\"_emscripten_register_main_browser_thread_id\"]=Module[\"asm\"][\"nb\"]).apply(null,arguments)};var __emscripten_do_dispatch_to_thread=Module[\"__emscripten_do_dispatch_to_thread\"]=function(){return(__emscripten_do_dispatch_to_thread=Module[\"__emscripten_do_dispatch_to_thread\"]=Module[\"asm\"][\"ob\"]).apply(null,arguments)};var _emscripten_sync_run_in_main_thread_4=Module[\"_emscripten_sync_run_in_main_thread_4\"]=function(){return(_emscripten_sync_run_in_main_thread_4=Module[\"_emscripten_sync_run_in_main_thread_4\"]=Module[\"asm\"][\"pb\"]).apply(null,arguments)};var _emscripten_run_in_main_runtime_thread_js=Module[\"_emscripten_run_in_main_runtime_thread_js\"]=function(){return(_emscripten_run_in_main_runtime_thread_js=Module[\"_emscripten_run_in_main_runtime_thread_js\"]=Module[\"asm\"][\"qb\"]).apply(null,arguments)};var __emscripten_call_on_thread=Module[\"__emscripten_call_on_thread\"]=function(){return(__emscripten_call_on_thread=Module[\"__emscripten_call_on_thread\"]=Module[\"asm\"][\"rb\"]).apply(null,arguments)};var _emscripten_tls_init=Module[\"_emscripten_tls_init\"]=function(){return(_emscripten_tls_init=Module[\"_emscripten_tls_init\"]=Module[\"asm\"][\"sb\"]).apply(null,arguments)};var __emscripten_thread_init=Module[\"__emscripten_thread_init\"]=function(){return(__emscripten_thread_init=Module[\"__emscripten_thread_init\"]=Module[\"asm\"][\"tb\"]).apply(null,arguments)};var stackSave=Module[\"stackSave\"]=function(){return(stackSave=Module[\"stackSave\"]=Module[\"asm\"][\"ub\"]).apply(null,arguments)};var stackRestore=Module[\"stackRestore\"]=function(){return(stackRestore=Module[\"stackRestore\"]=Module[\"asm\"][\"vb\"]).apply(null,arguments)};var stackAlloc=Module[\"stackAlloc\"]=function(){return(stackAlloc=Module[\"stackAlloc\"]=Module[\"asm\"][\"wb\"]).apply(null,arguments)};var _emscripten_stack_set_limits=Module[\"_emscripten_stack_set_limits\"]=function(){return(_emscripten_stack_set_limits=Module[\"_emscripten_stack_set_limits\"]=Module[\"asm\"][\"xb\"]).apply(null,arguments)};var _memalign=Module[\"_memalign\"]=function(){return(_memalign=Module[\"_memalign\"]=Module[\"asm\"][\"yb\"]).apply(null,arguments)};var __emscripten_allow_main_runtime_queued_calls=Module[\"__emscripten_allow_main_runtime_queued_calls\"]=10016;var __emscripten_main_thread_futex=Module[\"__emscripten_main_thread_futex\"]=11652;Module[\"cwrap\"]=cwrap;Module[\"PThread\"]=PThread;Module[\"PThread\"]=PThread;Module[\"wasmMemory\"]=wasmMemory;Module[\"ExitStatus\"]=ExitStatus;var calledRun;function ExitStatus(status){this.name=\"ExitStatus\";this.message=\"Program terminated with exit(\"+status+\")\";this.status=status}dependenciesFulfilled=function runCaller(){if(!calledRun)run();if(!calledRun)dependenciesFulfilled=runCaller};function run(args){args=args||arguments_;if(runDependencies>0){return}if(ENVIRONMENT_IS_PTHREAD){readyPromiseResolve(Module);initRuntime();postMessage({\"cmd\":\"loaded\"});return}preRun();if(runDependencies>0){return}function doRun(){if(calledRun)return;calledRun=true;Module[\"calledRun\"]=true;if(ABORT)return;initRuntime();preMain();readyPromiseResolve(Module);if(Module[\"onRuntimeInitialized\"])Module[\"onRuntimeInitialized\"]();postRun()}if(Module[\"setStatus\"]){Module[\"setStatus\"](\"Running...\");setTimeout(function(){setTimeout(function(){Module[\"setStatus\"](\"\")},1);doRun()},1)}else{doRun()}}Module[\"run\"]=run;function exit(status,implicit){if(implicit&&noExitRuntime&&status===0){return}if(!implicit){if(ENVIRONMENT_IS_PTHREAD){postMessage({\"cmd\":\"exitProcess\",\"returnCode\":status});throw new ExitStatus(status)}else{}}if(noExitRuntime){}else{PThread.terminateAllThreads();EXITSTATUS=status;exitRuntime();if(Module[\"onExit\"])Module[\"onExit\"](status);ABORT=true}quit_(status,new ExitStatus(status))}if(Module[\"preInit\"]){if(typeof Module[\"preInit\"]==\"function\")Module[\"preInit\"]=[Module[\"preInit\"]];while(Module[\"preInit\"].length>0){Module[\"preInit\"].pop()()}}if(ENVIRONMENT_IS_PTHREAD){noExitRuntime=false;PThread.initWorker()}run();\n\n\n return WasmBackendModuleThreadedSimd.ready\n}\n);\n})();\nif (typeof exports === 'object' && typeof module === 'object')\n module.exports = WasmBackendModuleThreadedSimd;\nelse if (typeof define === 'function' && define['amd'])\n define([], function() { return WasmBackendModuleThreadedSimd; });\nelse if (typeof exports === 'object')\n exports[\"WasmBackendModuleThreadedSimd\"] = WasmBackendModuleThreadedSimd;\n", "\nvar WasmBackendModule = (function() {\n var _scriptDir = typeof document !== 'undefined' && document.currentScript ? document.currentScript.src : undefined;\n if (typeof __filename !== 'undefined') _scriptDir = _scriptDir || __filename;\n return (\nfunction(WasmBackendModule) {\n WasmBackendModule = WasmBackendModule || {};\n\nvar Module=typeof WasmBackendModule!==\"undefined\"?WasmBackendModule:{};var readyPromiseResolve,readyPromiseReject;Module[\"ready\"]=new Promise(function(resolve,reject){readyPromiseResolve=resolve;readyPromiseReject=reject});var moduleOverrides={};var key;for(key in Module){if(Module.hasOwnProperty(key)){moduleOverrides[key]=Module[key]}}var arguments_=[];var thisProgram=\"./this.program\";var quit_=function(status,toThrow){throw toThrow};var ENVIRONMENT_IS_WEB=false;var ENVIRONMENT_IS_WORKER=false;var ENVIRONMENT_IS_NODE=false;var ENVIRONMENT_IS_SHELL=false;ENVIRONMENT_IS_WEB=typeof window===\"object\";ENVIRONMENT_IS_WORKER=typeof importScripts===\"function\";ENVIRONMENT_IS_NODE=typeof process===\"object\"&&typeof process.versions===\"object\"&&typeof process.versions.node===\"string\";ENVIRONMENT_IS_SHELL=!ENVIRONMENT_IS_WEB&&!ENVIRONMENT_IS_NODE&&!ENVIRONMENT_IS_WORKER;var scriptDirectory=\"\";function locateFile(path){if(Module[\"locateFile\"]){return Module[\"locateFile\"](path,scriptDirectory)}return scriptDirectory+path}var read_,readAsync,readBinary,setWindowTitle;var nodeFS;var nodePath;if(ENVIRONMENT_IS_NODE){if(ENVIRONMENT_IS_WORKER){scriptDirectory=require(\"path\").dirname(scriptDirectory)+\"/\"}else{scriptDirectory=__dirname+\"/\"}read_=function shell_read(filename,binary){if(!nodeFS)nodeFS=require(\"fs\");if(!nodePath)nodePath=require(\"path\");filename=nodePath[\"normalize\"](filename);return nodeFS[\"readFileSync\"](filename,binary?null:\"utf8\")};readBinary=function readBinary(filename){var ret=read_(filename,true);if(!ret.buffer){ret=new Uint8Array(ret)}assert(ret.buffer);return ret};if(process[\"argv\"].length>1){thisProgram=process[\"argv\"][1].replace(/\\\\/g,\"/\")}arguments_=process[\"argv\"].slice(2);process[\"on\"](\"uncaughtException\",function(ex){if(!(ex instanceof ExitStatus)){throw ex}});process[\"on\"](\"unhandledRejection\",abort);quit_=function(status){process[\"exit\"](status)};Module[\"inspect\"]=function(){return\"[Emscripten Module object]\"}}else if(ENVIRONMENT_IS_SHELL){if(typeof read!=\"undefined\"){read_=function shell_read(f){return read(f)}}readBinary=function readBinary(f){var data;if(typeof readbuffer===\"function\"){return new Uint8Array(readbuffer(f))}data=read(f,\"binary\");assert(typeof data===\"object\");return data};if(typeof scriptArgs!=\"undefined\"){arguments_=scriptArgs}else if(typeof arguments!=\"undefined\"){arguments_=arguments}if(typeof quit===\"function\"){quit_=function(status){quit(status)}}if(typeof print!==\"undefined\"){if(typeof console===\"undefined\")console={};console.log=print;console.warn=console.error=typeof printErr!==\"undefined\"?printErr:print}}else if(ENVIRONMENT_IS_WEB||ENVIRONMENT_IS_WORKER){if(ENVIRONMENT_IS_WORKER){scriptDirectory=self.location.href}else if(typeof document!==\"undefined\"&&document.currentScript){scriptDirectory=document.currentScript.src}if(_scriptDir){scriptDirectory=_scriptDir}if(scriptDirectory.indexOf(\"blob:\")!==0){scriptDirectory=scriptDirectory.substr(0,scriptDirectory.lastIndexOf(\"/\")+1)}else{scriptDirectory=\"\"}{read_=function(url){var xhr=new XMLHttpRequest;xhr.open(\"GET\",url,false);xhr.send(null);return xhr.responseText};if(ENVIRONMENT_IS_WORKER){readBinary=function(url){var xhr=new XMLHttpRequest;xhr.open(\"GET\",url,false);xhr.responseType=\"arraybuffer\";xhr.send(null);return new Uint8Array(xhr.response)}}readAsync=function(url,onload,onerror){var xhr=new XMLHttpRequest;xhr.open(\"GET\",url,true);xhr.responseType=\"arraybuffer\";xhr.onload=function(){if(xhr.status==200||xhr.status==0&&xhr.response){onload(xhr.response);return}onerror()};xhr.onerror=onerror;xhr.send(null)}}setWindowTitle=function(title){document.title=title}}else{}var out=Module[\"print\"]||console.log.bind(console);var err=Module[\"printErr\"]||console.warn.bind(console);for(key in moduleOverrides){if(moduleOverrides.hasOwnProperty(key)){Module[key]=moduleOverrides[key]}}moduleOverrides=null;if(Module[\"arguments\"])arguments_=Module[\"arguments\"];if(Module[\"thisProgram\"])thisProgram=Module[\"thisProgram\"];if(Module[\"quit\"])quit_=Module[\"quit\"];var wasmBinary;if(Module[\"wasmBinary\"])wasmBinary=Module[\"wasmBinary\"];var noExitRuntime=Module[\"noExitRuntime\"]||true;if(typeof WebAssembly!==\"object\"){abort(\"no native wasm support detected\")}var wasmMemory;var ABORT=false;var EXITSTATUS;function assert(condition,text){if(!condition){abort(\"Assertion failed: \"+text)}}function getCFunc(ident){var func=Module[\"_\"+ident];assert(func,\"Cannot call unknown function \"+ident+\", make sure it is exported\");return func}function ccall(ident,returnType,argTypes,args,opts){var toC={\"string\":function(str){var ret=0;if(str!==null&&str!==undefined&&str!==0){var len=(str.length<<2)+1;ret=stackAlloc(len);stringToUTF8(str,ret,len)}return ret},\"array\":function(arr){var ret=stackAlloc(arr.length);writeArrayToMemory(arr,ret);return ret}};function convertReturnValue(ret){if(returnType===\"string\")return UTF8ToString(ret);if(returnType===\"boolean\")return Boolean(ret);return ret}var func=getCFunc(ident);var cArgs=[];var stack=0;if(args){for(var i=0;i=endIdx))++endPtr;if(endPtr-idx>16&&heap.subarray&&UTF8Decoder){return UTF8Decoder.decode(heap.subarray(idx,endPtr))}else{var str=\"\";while(idx>10,56320|ch&1023)}}}return str}function UTF8ToString(ptr,maxBytesToRead){return ptr?UTF8ArrayToString(HEAPU8,ptr,maxBytesToRead):\"\"}function stringToUTF8Array(str,heap,outIdx,maxBytesToWrite){if(!(maxBytesToWrite>0))return 0;var startIdx=outIdx;var endIdx=outIdx+maxBytesToWrite-1;for(var i=0;i=55296&&u<=57343){var u1=str.charCodeAt(++i);u=65536+((u&1023)<<10)|u1&1023}if(u<=127){if(outIdx>=endIdx)break;heap[outIdx++]=u}else if(u<=2047){if(outIdx+1>=endIdx)break;heap[outIdx++]=192|u>>6;heap[outIdx++]=128|u&63}else if(u<=65535){if(outIdx+2>=endIdx)break;heap[outIdx++]=224|u>>12;heap[outIdx++]=128|u>>6&63;heap[outIdx++]=128|u&63}else{if(outIdx+3>=endIdx)break;heap[outIdx++]=240|u>>18;heap[outIdx++]=128|u>>12&63;heap[outIdx++]=128|u>>6&63;heap[outIdx++]=128|u&63}}heap[outIdx]=0;return outIdx-startIdx}function stringToUTF8(str,outPtr,maxBytesToWrite){return stringToUTF8Array(str,HEAPU8,outPtr,maxBytesToWrite)}function writeArrayToMemory(array,buffer){HEAP8.set(array,buffer)}function alignUp(x,multiple){if(x%multiple>0){x+=multiple-x%multiple}return x}var buffer,HEAP8,HEAPU8,HEAP16,HEAPU16,HEAP32,HEAPU32,HEAPF32,HEAPF64;function updateGlobalBufferAndViews(buf){buffer=buf;Module[\"HEAP8\"]=HEAP8=new Int8Array(buf);Module[\"HEAP16\"]=HEAP16=new Int16Array(buf);Module[\"HEAP32\"]=HEAP32=new Int32Array(buf);Module[\"HEAPU8\"]=HEAPU8=new Uint8Array(buf);Module[\"HEAPU16\"]=HEAPU16=new Uint16Array(buf);Module[\"HEAPU32\"]=HEAPU32=new Uint32Array(buf);Module[\"HEAPF32\"]=HEAPF32=new Float32Array(buf);Module[\"HEAPF64\"]=HEAPF64=new Float64Array(buf)}var INITIAL_MEMORY=Module[\"INITIAL_MEMORY\"]||16777216;var wasmTable;var __ATPRERUN__=[];var __ATINIT__=[];var __ATMAIN__=[];var __ATPOSTRUN__=[];var runtimeInitialized=false;__ATINIT__.push({func:function(){___wasm_call_ctors()}});function preRun(){if(Module[\"preRun\"]){if(typeof Module[\"preRun\"]==\"function\")Module[\"preRun\"]=[Module[\"preRun\"]];while(Module[\"preRun\"].length){addOnPreRun(Module[\"preRun\"].shift())}}callRuntimeCallbacks(__ATPRERUN__)}function initRuntime(){runtimeInitialized=true;callRuntimeCallbacks(__ATINIT__)}function preMain(){callRuntimeCallbacks(__ATMAIN__)}function postRun(){if(Module[\"postRun\"]){if(typeof Module[\"postRun\"]==\"function\")Module[\"postRun\"]=[Module[\"postRun\"]];while(Module[\"postRun\"].length){addOnPostRun(Module[\"postRun\"].shift())}}callRuntimeCallbacks(__ATPOSTRUN__)}function addOnPreRun(cb){__ATPRERUN__.unshift(cb)}function addOnPostRun(cb){__ATPOSTRUN__.unshift(cb)}var runDependencies=0;var runDependencyWatcher=null;var dependenciesFulfilled=null;function addRunDependency(id){runDependencies++;if(Module[\"monitorRunDependencies\"]){Module[\"monitorRunDependencies\"](runDependencies)}}function removeRunDependency(id){runDependencies--;if(Module[\"monitorRunDependencies\"]){Module[\"monitorRunDependencies\"](runDependencies)}if(runDependencies==0){if(runDependencyWatcher!==null){clearInterval(runDependencyWatcher);runDependencyWatcher=null}if(dependenciesFulfilled){var callback=dependenciesFulfilled;dependenciesFulfilled=null;callback()}}}Module[\"preloadedImages\"]={};Module[\"preloadedAudios\"]={};function abort(what){if(Module[\"onAbort\"]){Module[\"onAbort\"](what)}what+=\"\";err(what);ABORT=true;EXITSTATUS=1;what=\"abort(\"+what+\"). Build with -s ASSERTIONS=1 for more info.\";var e=new WebAssembly.RuntimeError(what);readyPromiseReject(e);throw e}function hasPrefix(str,prefix){return String.prototype.startsWith?str.startsWith(prefix):str.indexOf(prefix)===0}var dataURIPrefix=\"data:application/octet-stream;base64,\";function isDataURI(filename){return hasPrefix(filename,dataURIPrefix)}var fileURIPrefix=\"file://\";function isFileURI(filename){return hasPrefix(filename,fileURIPrefix)}var wasmBinaryFile=\"tfjs-backend-wasm.wasm\";if(!isDataURI(wasmBinaryFile)){wasmBinaryFile=locateFile(wasmBinaryFile)}function getBinary(file){try{if(file==wasmBinaryFile&&wasmBinary){return new Uint8Array(wasmBinary)}if(readBinary){return readBinary(file)}else{throw\"both async and sync fetching of the wasm failed\"}}catch(err){abort(err)}}function getBinaryPromise(){if(!wasmBinary&&(ENVIRONMENT_IS_WEB||ENVIRONMENT_IS_WORKER)){if(typeof fetch===\"function\"&&!isFileURI(wasmBinaryFile)){return fetch(wasmBinaryFile,{credentials:\"same-origin\"}).then(function(response){if(!response[\"ok\"]){throw\"failed to load wasm binary file at '\"+wasmBinaryFile+\"'\"}return response[\"arrayBuffer\"]()}).catch(function(){return getBinary(wasmBinaryFile)})}else{if(readAsync){return new Promise(function(resolve,reject){readAsync(wasmBinaryFile,function(response){resolve(new Uint8Array(response))},reject)})}}}return Promise.resolve().then(function(){return getBinary(wasmBinaryFile)})}function createWasm(){var info={\"a\":asmLibraryArg};function receiveInstance(instance,module){var exports=instance.exports;Module[\"asm\"]=exports;wasmMemory=Module[\"asm\"][\"i\"];updateGlobalBufferAndViews(wasmMemory.buffer);wasmTable=Module[\"asm\"][\"o\"];removeRunDependency(\"wasm-instantiate\")}addRunDependency(\"wasm-instantiate\");function receiveInstantiatedSource(output){receiveInstance(output[\"instance\"])}function instantiateArrayBuffer(receiver){return getBinaryPromise().then(function(binary){return WebAssembly.instantiate(binary,info)}).then(receiver,function(reason){err(\"failed to asynchronously prepare wasm: \"+reason);abort(reason)})}function instantiateAsync(){if(!wasmBinary&&typeof WebAssembly.instantiateStreaming===\"function\"&&!isDataURI(wasmBinaryFile)&&!isFileURI(wasmBinaryFile)&&typeof fetch===\"function\"){return fetch(wasmBinaryFile,{credentials:\"same-origin\"}).then(function(response){var result=WebAssembly.instantiateStreaming(response,info);return result.then(receiveInstantiatedSource,function(reason){err(\"wasm streaming compile failed: \"+reason);err(\"falling back to ArrayBuffer instantiation\");return instantiateArrayBuffer(receiveInstantiatedSource)})})}else{return instantiateArrayBuffer(receiveInstantiatedSource)}}if(Module[\"instantiateWasm\"]){try{var exports=Module[\"instantiateWasm\"](info,receiveInstance);return exports}catch(e){err(\"Module.instantiateWasm callback failed with error: \"+e);return false}}instantiateAsync().catch(readyPromiseReject);return{}}function callRuntimeCallbacks(callbacks){while(callbacks.length>0){var callback=callbacks.shift();if(typeof callback==\"function\"){callback(Module);continue}var func=callback.func;if(typeof func===\"number\"){if(callback.arg===undefined){wasmTable.get(func)()}else{wasmTable.get(func)(callback.arg)}}else{func(callback.arg===undefined?null:callback.arg)}}}function _abort(){abort()}function _emscripten_memcpy_big(dest,src,num){HEAPU8.copyWithin(dest,src,src+num)}function _emscripten_get_heap_size(){return HEAPU8.length}function emscripten_realloc_buffer(size){try{wasmMemory.grow(size-buffer.byteLength+65535>>>16);updateGlobalBufferAndViews(wasmMemory.buffer);return 1}catch(e){}}function _emscripten_resize_heap(requestedSize){var oldSize=_emscripten_get_heap_size();var maxHeapSize=2147483648;if(requestedSize>maxHeapSize){return false}for(var cutDown=1;cutDown<=4;cutDown*=2){var overGrownHeapSize=oldSize*(1+.2/cutDown);overGrownHeapSize=Math.min(overGrownHeapSize,requestedSize+100663296);var newSize=Math.min(maxHeapSize,alignUp(Math.max(requestedSize,overGrownHeapSize),65536));var replacement=emscripten_realloc_buffer(newSize);if(replacement){return true}}return false}var SYSCALLS={mappings:{},buffers:[null,[],[]],printChar:function(stream,curr){var buffer=SYSCALLS.buffers[stream];if(curr===0||curr===10){(stream===1?out:err)(UTF8ArrayToString(buffer,0));buffer.length=0}else{buffer.push(curr)}},varargs:undefined,get:function(){SYSCALLS.varargs+=4;var ret=HEAP32[SYSCALLS.varargs-4>>2];return ret},getStr:function(ptr){var ret=UTF8ToString(ptr);return ret},get64:function(low,high){return low}};function _fd_close(fd){return 0}function _fd_seek(fd,offset_low,offset_high,whence,newOffset){}function _fd_write(fd,iov,iovcnt,pnum){var num=0;for(var i=0;i>2];var len=HEAP32[iov+(i*8+4)>>2];for(var j=0;j>2]=num;return 0}function _pthread_create(){return 6}function setErrNo(value){HEAP32[___errno_location()>>2]=value;return value}function _sysconf(name){switch(name){case 30:return 16384;case 85:var maxHeapSize=2147483648;return maxHeapSize/16384;case 132:case 133:case 12:case 137:case 138:case 15:case 235:case 16:case 17:case 18:case 19:case 20:case 149:case 13:case 10:case 236:case 153:case 9:case 21:case 22:case 159:case 154:case 14:case 77:case 78:case 139:case 82:case 68:case 67:case 164:case 11:case 29:case 47:case 48:case 95:case 52:case 51:case 46:return 200809;case 27:case 246:case 127:case 128:case 23:case 24:case 160:case 161:case 181:case 182:case 242:case 183:case 184:case 243:case 244:case 245:case 165:case 178:case 179:case 49:case 50:case 168:case 169:case 175:case 170:case 171:case 172:case 97:case 76:case 32:case 173:case 35:case 80:case 81:case 79:return-1;case 176:case 177:case 7:case 155:case 8:case 157:case 125:case 126:case 92:case 93:case 129:case 130:case 131:case 94:case 91:return 1;case 74:case 60:case 69:case 70:case 4:return 1024;case 31:case 42:case 72:return 32;case 87:case 26:case 33:return 2147483647;case 34:case 1:return 47839;case 38:case 36:return 99;case 43:case 37:return 2048;case 0:return 2097152;case 3:return 65536;case 28:return 32768;case 44:return 32767;case 75:return 16384;case 39:return 1e3;case 89:return 700;case 71:return 256;case 40:return 255;case 2:return 100;case 180:return 64;case 25:return 20;case 5:return 16;case 6:return 6;case 73:return 4;case 84:{if(typeof navigator===\"object\")return navigator[\"hardwareConcurrency\"]||1;return 1}}setErrNo(28);return-1}var asmLibraryArg={\"a\":_abort,\"d\":_emscripten_memcpy_big,\"e\":_emscripten_resize_heap,\"f\":_fd_close,\"c\":_fd_seek,\"b\":_fd_write,\"g\":_pthread_create,\"h\":_sysconf};var asm=createWasm();var ___wasm_call_ctors=Module[\"___wasm_call_ctors\"]=function(){return(___wasm_call_ctors=Module[\"___wasm_call_ctors\"]=Module[\"asm\"][\"j\"]).apply(null,arguments)};var _init=Module[\"_init\"]=function(){return(_init=Module[\"_init\"]=Module[\"asm\"][\"k\"]).apply(null,arguments)};var _register_tensor=Module[\"_register_tensor\"]=function(){return(_register_tensor=Module[\"_register_tensor\"]=Module[\"asm\"][\"l\"]).apply(null,arguments)};var _dispose_data=Module[\"_dispose_data\"]=function(){return(_dispose_data=Module[\"_dispose_data\"]=Module[\"asm\"][\"m\"]).apply(null,arguments)};var _dispose=Module[\"_dispose\"]=function(){return(_dispose=Module[\"_dispose\"]=Module[\"asm\"][\"n\"]).apply(null,arguments)};var _Abs=Module[\"_Abs\"]=function(){return(_Abs=Module[\"_Abs\"]=Module[\"asm\"][\"p\"]).apply(null,arguments)};var _Add=Module[\"_Add\"]=function(){return(_Add=Module[\"_Add\"]=Module[\"asm\"][\"q\"]).apply(null,arguments)};var _AddN=Module[\"_AddN\"]=function(){return(_AddN=Module[\"_AddN\"]=Module[\"asm\"][\"r\"]).apply(null,arguments)};var _All=Module[\"_All\"]=function(){return(_All=Module[\"_All\"]=Module[\"asm\"][\"s\"]).apply(null,arguments)};var _Any=Module[\"_Any\"]=function(){return(_Any=Module[\"_Any\"]=Module[\"asm\"][\"t\"]).apply(null,arguments)};var _ArgMax=Module[\"_ArgMax\"]=function(){return(_ArgMax=Module[\"_ArgMax\"]=Module[\"asm\"][\"u\"]).apply(null,arguments)};var _AvgPool=Module[\"_AvgPool\"]=function(){return(_AvgPool=Module[\"_AvgPool\"]=Module[\"asm\"][\"v\"]).apply(null,arguments)};var _BatchMatMul=Module[\"_BatchMatMul\"]=function(){return(_BatchMatMul=Module[\"_BatchMatMul\"]=Module[\"asm\"][\"w\"]).apply(null,arguments)};var _Ceil=Module[\"_Ceil\"]=function(){return(_Ceil=Module[\"_Ceil\"]=Module[\"asm\"][\"x\"]).apply(null,arguments)};var _ClipByValue=Module[\"_ClipByValue\"]=function(){return(_ClipByValue=Module[\"_ClipByValue\"]=Module[\"asm\"][\"y\"]).apply(null,arguments)};var _Conv2D=Module[\"_Conv2D\"]=function(){return(_Conv2D=Module[\"_Conv2D\"]=Module[\"asm\"][\"z\"]).apply(null,arguments)};var _Conv2DBackpropInput=Module[\"_Conv2DBackpropInput\"]=function(){return(_Conv2DBackpropInput=Module[\"_Conv2DBackpropInput\"]=Module[\"asm\"][\"A\"]).apply(null,arguments)};var _Cos=Module[\"_Cos\"]=function(){return(_Cos=Module[\"_Cos\"]=Module[\"asm\"][\"B\"]).apply(null,arguments)};var _Cosh=Module[\"_Cosh\"]=function(){return(_Cosh=Module[\"_Cosh\"]=Module[\"asm\"][\"C\"]).apply(null,arguments)};var _CropAndResize=Module[\"_CropAndResize\"]=function(){return(_CropAndResize=Module[\"_CropAndResize\"]=Module[\"asm\"][\"D\"]).apply(null,arguments)};var _Cumsum=Module[\"_Cumsum\"]=function(){return(_Cumsum=Module[\"_Cumsum\"]=Module[\"asm\"][\"E\"]).apply(null,arguments)};var _DepthToSpace=Module[\"_DepthToSpace\"]=function(){return(_DepthToSpace=Module[\"_DepthToSpace\"]=Module[\"asm\"][\"F\"]).apply(null,arguments)};var _DepthwiseConv2dNative=Module[\"_DepthwiseConv2dNative\"]=function(){return(_DepthwiseConv2dNative=Module[\"_DepthwiseConv2dNative\"]=Module[\"asm\"][\"G\"]).apply(null,arguments)};var _Elu=Module[\"_Elu\"]=function(){return(_Elu=Module[\"_Elu\"]=Module[\"asm\"][\"H\"]).apply(null,arguments)};var _Equal=Module[\"_Equal\"]=function(){return(_Equal=Module[\"_Equal\"]=Module[\"asm\"][\"I\"]).apply(null,arguments)};var _Exp=Module[\"_Exp\"]=function(){return(_Exp=Module[\"_Exp\"]=Module[\"asm\"][\"J\"]).apply(null,arguments)};var _FlipLeftRight=Module[\"_FlipLeftRight\"]=function(){return(_FlipLeftRight=Module[\"_FlipLeftRight\"]=Module[\"asm\"][\"K\"]).apply(null,arguments)};var _Floor=Module[\"_Floor\"]=function(){return(_Floor=Module[\"_Floor\"]=Module[\"asm\"][\"L\"]).apply(null,arguments)};var _FloorDiv=Module[\"_FloorDiv\"]=function(){return(_FloorDiv=Module[\"_FloorDiv\"]=Module[\"asm\"][\"M\"]).apply(null,arguments)};var _FusedBatchNorm=Module[\"_FusedBatchNorm\"]=function(){return(_FusedBatchNorm=Module[\"_FusedBatchNorm\"]=Module[\"asm\"][\"N\"]).apply(null,arguments)};var _FusedConv2D=Module[\"_FusedConv2D\"]=function(){return(_FusedConv2D=Module[\"_FusedConv2D\"]=Module[\"asm\"][\"O\"]).apply(null,arguments)};var _FusedDepthwiseConv2D=Module[\"_FusedDepthwiseConv2D\"]=function(){return(_FusedDepthwiseConv2D=Module[\"_FusedDepthwiseConv2D\"]=Module[\"asm\"][\"P\"]).apply(null,arguments)};var _Gather=Module[\"_Gather\"]=function(){return(_Gather=Module[\"_Gather\"]=Module[\"asm\"][\"Q\"]).apply(null,arguments)};var _GatherNd=Module[\"_GatherNd\"]=function(){return(_GatherNd=Module[\"_GatherNd\"]=Module[\"asm\"][\"R\"]).apply(null,arguments)};var _Greater=Module[\"_Greater\"]=function(){return(_Greater=Module[\"_Greater\"]=Module[\"asm\"][\"S\"]).apply(null,arguments)};var _GreaterEqual=Module[\"_GreaterEqual\"]=function(){return(_GreaterEqual=Module[\"_GreaterEqual\"]=Module[\"asm\"][\"T\"]).apply(null,arguments)};var _LeakyRelu=Module[\"_LeakyRelu\"]=function(){return(_LeakyRelu=Module[\"_LeakyRelu\"]=Module[\"asm\"][\"U\"]).apply(null,arguments)};var _Less=Module[\"_Less\"]=function(){return(_Less=Module[\"_Less\"]=Module[\"asm\"][\"V\"]).apply(null,arguments)};var _LessEqual=Module[\"_LessEqual\"]=function(){return(_LessEqual=Module[\"_LessEqual\"]=Module[\"asm\"][\"W\"]).apply(null,arguments)};var _Log=Module[\"_Log\"]=function(){return(_Log=Module[\"_Log\"]=Module[\"asm\"][\"X\"]).apply(null,arguments)};var _LogicalAnd=Module[\"_LogicalAnd\"]=function(){return(_LogicalAnd=Module[\"_LogicalAnd\"]=Module[\"asm\"][\"Y\"]).apply(null,arguments)};var _Max=Module[\"_Max\"]=function(){return(_Max=Module[\"_Max\"]=Module[\"asm\"][\"Z\"]).apply(null,arguments)};var _MaxPool=Module[\"_MaxPool\"]=function(){return(_MaxPool=Module[\"_MaxPool\"]=Module[\"asm\"][\"_\"]).apply(null,arguments)};var _Maximum=Module[\"_Maximum\"]=function(){return(_Maximum=Module[\"_Maximum\"]=Module[\"asm\"][\"$\"]).apply(null,arguments)};var _Mean=Module[\"_Mean\"]=function(){return(_Mean=Module[\"_Mean\"]=Module[\"asm\"][\"aa\"]).apply(null,arguments)};var _Min=Module[\"_Min\"]=function(){return(_Min=Module[\"_Min\"]=Module[\"asm\"][\"ba\"]).apply(null,arguments)};var _Minimum=Module[\"_Minimum\"]=function(){return(_Minimum=Module[\"_Minimum\"]=Module[\"asm\"][\"ca\"]).apply(null,arguments)};var _MirrorPad=Module[\"_MirrorPad\"]=function(){return(_MirrorPad=Module[\"_MirrorPad\"]=Module[\"asm\"][\"da\"]).apply(null,arguments)};var _Multiply=Module[\"_Multiply\"]=function(){return(_Multiply=Module[\"_Multiply\"]=Module[\"asm\"][\"ea\"]).apply(null,arguments)};var _Neg=Module[\"_Neg\"]=function(){return(_Neg=Module[\"_Neg\"]=Module[\"asm\"][\"fa\"]).apply(null,arguments)};var _NonMaxSuppressionV3=Module[\"_NonMaxSuppressionV3\"]=function(){return(_NonMaxSuppressionV3=Module[\"_NonMaxSuppressionV3\"]=Module[\"asm\"][\"ga\"]).apply(null,arguments)};var _NonMaxSuppressionV4=Module[\"_NonMaxSuppressionV4\"]=function(){return(_NonMaxSuppressionV4=Module[\"_NonMaxSuppressionV4\"]=Module[\"asm\"][\"ha\"]).apply(null,arguments)};var _NonMaxSuppressionV5=Module[\"_NonMaxSuppressionV5\"]=function(){return(_NonMaxSuppressionV5=Module[\"_NonMaxSuppressionV5\"]=Module[\"asm\"][\"ia\"]).apply(null,arguments)};var _NotEqual=Module[\"_NotEqual\"]=function(){return(_NotEqual=Module[\"_NotEqual\"]=Module[\"asm\"][\"ja\"]).apply(null,arguments)};var _OneHot=Module[\"_OneHot\"]=function(){return(_OneHot=Module[\"_OneHot\"]=Module[\"asm\"][\"ka\"]).apply(null,arguments)};var _PadV2=Module[\"_PadV2\"]=function(){return(_PadV2=Module[\"_PadV2\"]=Module[\"asm\"][\"la\"]).apply(null,arguments)};var _Pow=Module[\"_Pow\"]=function(){return(_Pow=Module[\"_Pow\"]=Module[\"asm\"][\"ma\"]).apply(null,arguments)};var _Prelu=Module[\"_Prelu\"]=function(){return(_Prelu=Module[\"_Prelu\"]=Module[\"asm\"][\"na\"]).apply(null,arguments)};var _Prod=Module[\"_Prod\"]=function(){return(_Prod=Module[\"_Prod\"]=Module[\"asm\"][\"oa\"]).apply(null,arguments)};var _RealDiv=Module[\"_RealDiv\"]=function(){return(_RealDiv=Module[\"_RealDiv\"]=Module[\"asm\"][\"pa\"]).apply(null,arguments)};var _Relu=Module[\"_Relu\"]=function(){return(_Relu=Module[\"_Relu\"]=Module[\"asm\"][\"qa\"]).apply(null,arguments)};var _Relu6=Module[\"_Relu6\"]=function(){return(_Relu6=Module[\"_Relu6\"]=Module[\"asm\"][\"ra\"]).apply(null,arguments)};var _ResizeBilinear=Module[\"_ResizeBilinear\"]=function(){return(_ResizeBilinear=Module[\"_ResizeBilinear\"]=Module[\"asm\"][\"sa\"]).apply(null,arguments)};var _Reverse=Module[\"_Reverse\"]=function(){return(_Reverse=Module[\"_Reverse\"]=Module[\"asm\"][\"ta\"]).apply(null,arguments)};var _RotateWithOffset=Module[\"_RotateWithOffset\"]=function(){return(_RotateWithOffset=Module[\"_RotateWithOffset\"]=Module[\"asm\"][\"ua\"]).apply(null,arguments)};var _Round=Module[\"_Round\"]=function(){return(_Round=Module[\"_Round\"]=Module[\"asm\"][\"va\"]).apply(null,arguments)};var _Rsqrt=Module[\"_Rsqrt\"]=function(){return(_Rsqrt=Module[\"_Rsqrt\"]=Module[\"asm\"][\"wa\"]).apply(null,arguments)};var _ScatterNd=Module[\"_ScatterNd\"]=function(){return(_ScatterNd=Module[\"_ScatterNd\"]=Module[\"asm\"][\"xa\"]).apply(null,arguments)};var _SelectV2=Module[\"_SelectV2\"]=function(){return(_SelectV2=Module[\"_SelectV2\"]=Module[\"asm\"][\"ya\"]).apply(null,arguments)};var _Sigmoid=Module[\"_Sigmoid\"]=function(){return(_Sigmoid=Module[\"_Sigmoid\"]=Module[\"asm\"][\"za\"]).apply(null,arguments)};var _Sin=Module[\"_Sin\"]=function(){return(_Sin=Module[\"_Sin\"]=Module[\"asm\"][\"Aa\"]).apply(null,arguments)};var _Softmax=Module[\"_Softmax\"]=function(){return(_Softmax=Module[\"_Softmax\"]=Module[\"asm\"][\"Ba\"]).apply(null,arguments)};var _Sqrt=Module[\"_Sqrt\"]=function(){return(_Sqrt=Module[\"_Sqrt\"]=Module[\"asm\"][\"Ca\"]).apply(null,arguments)};var _Square=Module[\"_Square\"]=function(){return(_Square=Module[\"_Square\"]=Module[\"asm\"][\"Da\"]).apply(null,arguments)};var _SquaredDifference=Module[\"_SquaredDifference\"]=function(){return(_SquaredDifference=Module[\"_SquaredDifference\"]=Module[\"asm\"][\"Ea\"]).apply(null,arguments)};var _Step=Module[\"_Step\"]=function(){return(_Step=Module[\"_Step\"]=Module[\"asm\"][\"Fa\"]).apply(null,arguments)};var _StridedSlice=Module[\"_StridedSlice\"]=function(){return(_StridedSlice=Module[\"_StridedSlice\"]=Module[\"asm\"][\"Ga\"]).apply(null,arguments)};var _Sub=Module[\"_Sub\"]=function(){return(_Sub=Module[\"_Sub\"]=Module[\"asm\"][\"Ha\"]).apply(null,arguments)};var _Sum=Module[\"_Sum\"]=function(){return(_Sum=Module[\"_Sum\"]=Module[\"asm\"][\"Ia\"]).apply(null,arguments)};var _Tan=Module[\"_Tan\"]=function(){return(_Tan=Module[\"_Tan\"]=Module[\"asm\"][\"Ja\"]).apply(null,arguments)};var _Tanh=Module[\"_Tanh\"]=function(){return(_Tanh=Module[\"_Tanh\"]=Module[\"asm\"][\"Ka\"]).apply(null,arguments)};var _Tile=Module[\"_Tile\"]=function(){return(_Tile=Module[\"_Tile\"]=Module[\"asm\"][\"La\"]).apply(null,arguments)};var _TopK=Module[\"_TopK\"]=function(){return(_TopK=Module[\"_TopK\"]=Module[\"asm\"][\"Ma\"]).apply(null,arguments)};var _Transform=Module[\"_Transform\"]=function(){return(_Transform=Module[\"_Transform\"]=Module[\"asm\"][\"Na\"]).apply(null,arguments)};var _Transpose=Module[\"_Transpose\"]=function(){return(_Transpose=Module[\"_Transpose\"]=Module[\"asm\"][\"Oa\"]).apply(null,arguments)};var __FusedMatMul=Module[\"__FusedMatMul\"]=function(){return(__FusedMatMul=Module[\"__FusedMatMul\"]=Module[\"asm\"][\"Pa\"]).apply(null,arguments)};var _malloc=Module[\"_malloc\"]=function(){return(_malloc=Module[\"_malloc\"]=Module[\"asm\"][\"Qa\"]).apply(null,arguments)};var _free=Module[\"_free\"]=function(){return(_free=Module[\"_free\"]=Module[\"asm\"][\"Ra\"]).apply(null,arguments)};var ___errno_location=Module[\"___errno_location\"]=function(){return(___errno_location=Module[\"___errno_location\"]=Module[\"asm\"][\"Sa\"]).apply(null,arguments)};var stackSave=Module[\"stackSave\"]=function(){return(stackSave=Module[\"stackSave\"]=Module[\"asm\"][\"Ta\"]).apply(null,arguments)};var stackRestore=Module[\"stackRestore\"]=function(){return(stackRestore=Module[\"stackRestore\"]=Module[\"asm\"][\"Ua\"]).apply(null,arguments)};var stackAlloc=Module[\"stackAlloc\"]=function(){return(stackAlloc=Module[\"stackAlloc\"]=Module[\"asm\"][\"Va\"]).apply(null,arguments)};Module[\"cwrap\"]=cwrap;var calledRun;function ExitStatus(status){this.name=\"ExitStatus\";this.message=\"Program terminated with exit(\"+status+\")\";this.status=status}dependenciesFulfilled=function runCaller(){if(!calledRun)run();if(!calledRun)dependenciesFulfilled=runCaller};function run(args){args=args||arguments_;if(runDependencies>0){return}preRun();if(runDependencies>0){return}function doRun(){if(calledRun)return;calledRun=true;Module[\"calledRun\"]=true;if(ABORT)return;initRuntime();preMain();readyPromiseResolve(Module);if(Module[\"onRuntimeInitialized\"])Module[\"onRuntimeInitialized\"]();postRun()}if(Module[\"setStatus\"]){Module[\"setStatus\"](\"Running...\");setTimeout(function(){setTimeout(function(){Module[\"setStatus\"](\"\")},1);doRun()},1)}else{doRun()}}Module[\"run\"]=run;if(Module[\"preInit\"]){if(typeof Module[\"preInit\"]==\"function\")Module[\"preInit\"]=[Module[\"preInit\"]];while(Module[\"preInit\"].length>0){Module[\"preInit\"].pop()()}}run();\n\n\n return WasmBackendModule.ready\n}\n);\n})();\nif (typeof exports === 'object' && typeof module === 'object')\n module.exports = WasmBackendModule;\nelse if (typeof define === 'function' && define['amd'])\n define([], function() { return WasmBackendModule; });\nelse if (typeof exports === 'object')\n exports[\"WasmBackendModule\"] = WasmBackendModule;\n", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Backend, DataId} from '../tensor';\nimport {BackendValues, DataType} from '../types';\n\nexport const EPSILON_FLOAT32 = 1e-7;\nexport const EPSILON_FLOAT16 = 1e-4;\n\n// Required information for all backends.\nexport interface BackendTimingInfo {\n kernelMs: number|{error: string};\n getExtraProfileInfo?(): string; // a field for additional timing information\n // e.g. packing / unpacking for WebGL backend\n}\n\nexport interface TensorStorage {\n read(dataId: DataId): Promise;\n readSync(dataId: DataId): BackendValues;\n disposeData(dataId: DataId, force?: boolean): boolean;\n write(values: BackendValues, shape: number[], dtype: DataType): DataId;\n move(\n dataId: DataId, values: BackendValues, shape: number[], dtype: DataType,\n refCount: number): void;\n memory(): {unreliable: boolean;}; // Backend-specific information.\n /** Returns number of data ids currently in the storage. */\n numDataIds(): number;\n refCount(dataId: DataId): number;\n}\n\n/** Convenient class for storing tensor-related data. */\nexport class DataStorage {\n private data = new WeakMap();\n private dataIdsCount = 0;\n\n constructor(private backend: KernelBackend, private dataMover: DataMover) {}\n\n get(dataId: DataId) {\n if (!this.data.has(dataId)) {\n this.dataMover.moveData(this.backend, dataId);\n }\n return this.data.get(dataId);\n }\n\n set(dataId: DataId, value: T): void {\n this.dataIdsCount++;\n this.data.set(dataId, value);\n }\n\n has(dataId: DataId): boolean {\n return this.data.has(dataId);\n }\n\n delete(dataId: DataId): boolean {\n this.dataIdsCount--;\n return this.data.delete(dataId);\n }\n\n numDataIds(): number {\n return this.dataIdsCount;\n }\n}\n\nexport interface DataMover {\n /**\n * To be called by backends whenever they see a dataId that they don't own.\n * Upon calling this method, the mover will fetch the tensor from another\n * backend and register it with the current active backend.\n */\n moveData(backend: KernelBackend, dataId: DataId): void;\n}\n\nexport interface BackendTimer {\n // check if backend timer is available\n timerAvailable(): boolean;\n time(f: () => void): Promise;\n}\n\n/**\n * The interface that defines the kernels that should be implemented when\n * adding a new backend. New backends don't need to implement every one of the\n * methods, this can be done gradually (throw an error for unimplemented\n * methods).\n */\nexport class KernelBackend implements TensorStorage, Backend, BackendTimer {\n refCount(dataId: DataId): number {\n return notYetImplemented('refCount');\n }\n incRef(dataId: DataId): void {\n return notYetImplemented('incRef');\n }\n timerAvailable(): boolean {\n return true;\n }\n time(f: () => void): Promise {\n return notYetImplemented('time');\n }\n read(dataId: object): Promise {\n return notYetImplemented('read');\n }\n readSync(dataId: object): BackendValues {\n return notYetImplemented('readSync');\n }\n numDataIds(): number {\n return notYetImplemented('numDataIds');\n }\n disposeData(dataId: object, force?: boolean): boolean {\n return notYetImplemented('disposeData');\n }\n write(values: BackendValues, shape: number[], dtype: DataType): DataId {\n return notYetImplemented('write');\n }\n move(\n dataId: DataId, values: BackendValues, shape: number[], dtype: DataType,\n refCount: number): void {\n return notYetImplemented('move');\n }\n memory(): {unreliable: boolean; reasons?: string[]} {\n return notYetImplemented('memory');\n }\n /** Returns the highest precision for floats in bits (e.g. 16 or 32) */\n floatPrecision(): 16|32 {\n return notYetImplemented('floatPrecision');\n }\n /** Returns the smallest representable number. */\n epsilon(): number {\n return this.floatPrecision() === 32 ? EPSILON_FLOAT32 : EPSILON_FLOAT16;\n }\n dispose(): void {\n return notYetImplemented('dispose');\n }\n}\n\nfunction notYetImplemented(kernelName: string): never {\n throw new Error(\n `'${kernelName}' not yet implemented or not found in the registry. ` +\n `This kernel may not be supported by the tfjs backend you have chosen`);\n}\n", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, DataTypeMap, FlatVector, NumericDataType, RecursiveArray, TensorLike, TypedArray} from './types';\n\n/**\n * Shuffles the array in-place using Fisher-Yates algorithm.\n *\n * ```js\n * const a = [1, 2, 3, 4, 5];\n * tf.util.shuffle(a);\n * console.log(a);\n * ```\n *\n * @param array The array to shuffle in-place.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\n// tslint:disable-next-line:no-any\nexport function shuffle(array: any[]|Uint32Array|Int32Array|\n Float32Array): void {\n let counter = array.length;\n let index = 0;\n // While there are elements in the array\n while (counter > 0) {\n // Pick a random index\n index = (Math.random() * counter) | 0;\n // Decrease counter by 1\n counter--;\n // And swap the last element with it\n swap(array, counter, index);\n }\n}\n\n/**\n * Shuffles two arrays in-place the same way using Fisher-Yates algorithm.\n *\n * ```js\n * const a = [1,2,3,4,5];\n * const b = [11,22,33,44,55];\n * tf.util.shuffleCombo(a, b);\n * console.log(a, b);\n * ```\n *\n * @param array The first array to shuffle in-place.\n * @param array2 The second array to shuffle in-place with the same permutation\n * as the first array.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function shuffleCombo(\n // tslint:disable-next-line:no-any\n array: any[]|Uint32Array|Int32Array|Float32Array,\n // tslint:disable-next-line:no-any\n array2: any[]|Uint32Array|Int32Array|Float32Array): void {\n if (array.length !== array2.length) {\n throw new Error(\n `Array sizes must match to be shuffled together ` +\n `First array length was ${array.length}` +\n `Second array length was ${array2.length}`);\n }\n let counter = array.length;\n let index = 0;\n // While there are elements in the array\n while (counter > 0) {\n // Pick a random index\n index = (Math.random() * counter) | 0;\n // Decrease counter by 1\n counter--;\n // And swap the last element of each array with it\n swap(array, counter, index);\n swap(array2, counter, index);\n }\n}\n\n/** Clamps a value to a specified range. */\nexport function clamp(min: number, x: number, max: number): number {\n return Math.max(min, Math.min(x, max));\n}\n\nexport function nearestLargerEven(val: number): number {\n return val % 2 === 0 ? val : val + 1;\n}\n\nexport function swap(\n object: {[index: number]: T}, left: number, right: number) {\n const temp = object[left];\n object[left] = object[right];\n object[right] = temp;\n}\n\nexport function sum(arr: number[]): number {\n let sum = 0;\n for (let i = 0; i < arr.length; i++) {\n sum += arr[i];\n }\n return sum;\n}\n\n/**\n * Returns a sample from a uniform [a, b) distribution.\n *\n * @param a The minimum support (inclusive).\n * @param b The maximum support (exclusive).\n * @return A pseudorandom number on the half-open interval [a,b).\n */\nexport function randUniform(a: number, b: number) {\n const r = Math.random();\n return (b * r) + (1 - r) * a;\n}\n\n/** Returns the squared Euclidean distance between two vectors. */\nexport function distSquared(a: FlatVector, b: FlatVector): number {\n let result = 0;\n for (let i = 0; i < a.length; i++) {\n const diff = Number(a[i]) - Number(b[i]);\n result += diff * diff;\n }\n return result;\n}\n\n/**\n * Asserts that the expression is true. Otherwise throws an error with the\n * provided message.\n *\n * ```js\n * const x = 2;\n * tf.util.assert(x === 2, 'x is not 2');\n * ```\n *\n * @param expr The expression to assert (as a boolean).\n * @param msg A function that returns the message to report when throwing an\n * error. We use a function for performance reasons.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function assert(expr: boolean, msg: () => string) {\n if (!expr) {\n throw new Error(typeof msg === 'string' ? msg : msg());\n }\n}\n\nexport function assertShapesMatch(\n shapeA: number[], shapeB: number[], errorMessagePrefix = ''): void {\n assert(\n arraysEqual(shapeA, shapeB),\n () => errorMessagePrefix + ` Shapes ${shapeA} and ${shapeB} must match`);\n}\n\nexport function assertNonNull(a: TensorLike): void {\n assert(\n a != null,\n () => `The input to the tensor constructor must be a non-null value.`);\n}\n\n// NOTE: We explicitly type out what T extends instead of any so that\n// util.flatten on a nested array of number doesn't try to infer T as a\n// number[][], causing us to explicitly type util.flatten().\n/**\n * Flattens an arbitrarily nested array.\n *\n * ```js\n * const a = [[1, 2], [3, 4], [5, [6, [7]]]];\n * const flat = tf.util.flatten(a);\n * console.log(flat);\n * ```\n *\n * @param arr The nested array to flatten.\n * @param result The destination array which holds the elements.\n * @param skipTypedArray If true, avoids flattening the typed arrays. Defaults\n * to false.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function\nflatten|TypedArray>(\n arr: T|RecursiveArray, result: T[] = [], skipTypedArray = false): T[] {\n if (result == null) {\n result = [];\n }\n if (Array.isArray(arr) || isTypedArray(arr) && !skipTypedArray) {\n for (let i = 0; i < arr.length; ++i) {\n flatten(arr[i], result, skipTypedArray);\n }\n } else {\n result.push(arr as T);\n }\n return result;\n}\n\n/**\n * Returns the size (number of elements) of the tensor given its shape.\n *\n * ```js\n * const shape = [3, 4, 2];\n * const size = tf.util.sizeFromShape(shape);\n * console.log(size);\n * ```\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function sizeFromShape(shape: number[]): number {\n if (shape.length === 0) {\n // Scalar.\n return 1;\n }\n let size = shape[0];\n for (let i = 1; i < shape.length; i++) {\n size *= shape[i];\n }\n return size;\n}\n\nexport function isScalarShape(shape: number[]): boolean {\n return shape.length === 0;\n}\n\nexport function arraysEqual(n1: FlatVector, n2: FlatVector) {\n if (n1 === n2) {\n return true;\n }\n if (n1 == null || n2 == null) {\n return false;\n }\n\n if (n1.length !== n2.length) {\n return false;\n }\n for (let i = 0; i < n1.length; i++) {\n if (n1[i] !== n2[i]) {\n return false;\n }\n }\n return true;\n}\n\nexport function isInt(a: number): boolean {\n return a % 1 === 0;\n}\n\nexport function tanh(x: number): number {\n // tslint:disable-next-line:no-any\n if ((Math as any).tanh != null) {\n // tslint:disable-next-line:no-any\n return (Math as any).tanh(x);\n }\n if (x === Infinity) {\n return 1;\n } else if (x === -Infinity) {\n return -1;\n } else {\n const e2x = Math.exp(2 * x);\n return (e2x - 1) / (e2x + 1);\n }\n}\n\nexport function sizeToSquarishShape(size: number): [number, number] {\n const width = Math.ceil(Math.sqrt(size));\n return [width, Math.ceil(size / width)];\n}\n\n/**\n * Creates a new array with randomized indicies to a given quantity.\n *\n * ```js\n * const randomTen = tf.util.createShuffledIndices(10);\n * console.log(randomTen);\n * ```\n *\n * @param number Quantity of how many shuffled indicies to create.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function createShuffledIndices(n: number): Uint32Array {\n const shuffledIndices = new Uint32Array(n);\n for (let i = 0; i < n; ++i) {\n shuffledIndices[i] = i;\n }\n shuffle(shuffledIndices);\n return shuffledIndices;\n}\n\nexport function rightPad(a: string, size: number): string {\n if (size <= a.length) {\n return a;\n }\n return a + ' '.repeat(size - a.length);\n}\n\nexport function repeatedTry(\n checkFn: () => boolean, delayFn = (counter: number) => 0,\n maxCounter?: number): Promise {\n return new Promise((resolve, reject) => {\n let tryCount = 0;\n\n const tryFn = () => {\n if (checkFn()) {\n resolve();\n return;\n }\n\n tryCount++;\n\n const nextBackoff = delayFn(tryCount);\n\n if (maxCounter != null && tryCount >= maxCounter) {\n reject();\n return;\n }\n setTimeout(tryFn, nextBackoff);\n };\n\n tryFn();\n });\n}\n\n/**\n * Given the full size of the array and a shape that may contain -1 as the\n * implicit dimension, returns the inferred shape where -1 is replaced.\n * E.g. For shape=[2, -1, 3] and size=24, it will return [2, 4, 3].\n *\n * @param shape The shape, which may contain -1 in some dimension.\n * @param size The full size (number of elements) of the array.\n * @return The inferred shape where -1 is replaced with the inferred size.\n */\nexport function inferFromImplicitShape(\n shape: number[], size: number): number[] {\n let shapeProd = 1;\n let implicitIdx = -1;\n\n for (let i = 0; i < shape.length; ++i) {\n if (shape[i] >= 0) {\n shapeProd *= shape[i];\n } else if (shape[i] === -1) {\n if (implicitIdx !== -1) {\n throw Error(\n `Shapes can only have 1 implicit size. ` +\n `Found -1 at dim ${implicitIdx} and dim ${i}`);\n }\n implicitIdx = i;\n } else if (shape[i] < 0) {\n throw Error(`Shapes can not be < 0. Found ${shape[i]} at dim ${i}`);\n }\n }\n\n if (implicitIdx === -1) {\n if (size > 0 && size !== shapeProd) {\n throw Error(`Size(${size}) must match the product of shape ${shape}`);\n }\n return shape;\n }\n\n if (shapeProd === 0) {\n throw Error(\n `Cannot infer the missing size in [${shape}] when ` +\n `there are 0 elements`);\n }\n if (size % shapeProd !== 0) {\n throw Error(\n `The implicit shape can't be a fractional number. ` +\n `Got ${size} / ${shapeProd}`);\n }\n\n const newShape = shape.slice();\n newShape[implicitIdx] = size / shapeProd;\n return newShape;\n}\n\nexport function parseAxisParam(\n axis: number|number[], shape: number[]): number[] {\n const rank = shape.length;\n\n // Normalize input\n axis = axis == null ? shape.map((s, i) => i) : [].concat(axis);\n\n // Check for valid range\n assert(\n axis.every(ax => ax >= -rank && ax < rank),\n () =>\n `All values in axis param must be in range [-${rank}, ${rank}) but ` +\n `got axis ${axis}`);\n\n // Check for only integers\n assert(\n axis.every(ax => isInt(ax)),\n () => `All values in axis param must be integers but ` +\n `got axis ${axis}`);\n\n // Handle negative axis.\n return axis.map(a => a < 0 ? rank + a : a);\n}\n\n/** Reduces the shape by removing all dimensions of shape 1. */\nexport function squeezeShape(shape: number[], axis?: number[]):\n {newShape: number[], keptDims: number[]} {\n const newShape: number[] = [];\n const keptDims: number[] = [];\n const isEmptyArray = axis != null && Array.isArray(axis) && axis.length === 0;\n const axes = (axis == null || isEmptyArray) ?\n null :\n parseAxisParam(axis, shape).sort();\n let j = 0;\n for (let i = 0; i < shape.length; ++i) {\n if (axes != null) {\n if (axes[j] === i && shape[i] !== 1) {\n throw new Error(\n `Can't squeeze axis ${i} since its dim '${shape[i]}' is not 1`);\n }\n if ((axes[j] == null || axes[j] > i) && shape[i] === 1) {\n newShape.push(shape[i]);\n keptDims.push(i);\n }\n if (axes[j] <= i) {\n j++;\n }\n }\n if (shape[i] !== 1) {\n newShape.push(shape[i]);\n keptDims.push(i);\n }\n }\n return {newShape, keptDims};\n}\n\nexport function getTypedArrayFromDType(\n dtype: D, size: number): DataTypeMap[D] {\n let values = null;\n if (dtype == null || dtype === 'float32') {\n values = new Float32Array(size);\n } else if (dtype === 'int32') {\n values = new Int32Array(size);\n } else if (dtype === 'bool') {\n values = new Uint8Array(size);\n } else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n return values as DataTypeMap[D];\n}\n\nexport function getArrayFromDType(\n dtype: D, size: number): DataTypeMap[D] {\n let values = null;\n if (dtype == null || dtype === 'float32') {\n values = new Float32Array(size);\n } else if (dtype === 'int32') {\n values = new Int32Array(size);\n } else if (dtype === 'bool') {\n values = new Uint8Array(size);\n } else if (dtype === 'string') {\n values = new Array<'string'>(size);\n } else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n return values as DataTypeMap[D];\n}\n\nexport function checkConversionForErrors(\n vals: DataTypeMap[D]|number[], dtype: D): void {\n for (let i = 0; i < vals.length; i++) {\n const num = vals[i] as number;\n if (isNaN(num) || !isFinite(num)) {\n throw Error(`A tensor of type ${dtype} being uploaded contains ${num}.`);\n }\n }\n}\n\n/** Returns true if the dtype is valid. */\nexport function isValidDtype(dtype: DataType): boolean {\n return dtype === 'bool' || dtype === 'complex64' || dtype === 'float32' ||\n dtype === 'int32' || dtype === 'string';\n}\n\n/**\n * Returns true if the new type can't encode the old type without loss of\n * precision.\n */\nexport function hasEncodingLoss(oldType: DataType, newType: DataType): boolean {\n if (newType === 'complex64') {\n return false;\n }\n if (newType === 'float32' && oldType !== 'complex64') {\n return false;\n }\n if (newType === 'int32' && oldType !== 'float32' && oldType !== 'complex64') {\n return false;\n }\n if (newType === 'bool' && oldType === 'bool') {\n return false;\n }\n return true;\n}\n\nexport function isTypedArray(a: {}): a is Float32Array|Int32Array|Uint8Array {\n return a instanceof Float32Array || a instanceof Int32Array ||\n a instanceof Uint8Array;\n}\n\nexport function bytesPerElement(dtype: DataType): number {\n if (dtype === 'float32' || dtype === 'int32') {\n return 4;\n } else if (dtype === 'complex64') {\n return 8;\n } else if (dtype === 'bool') {\n return 1;\n } else {\n throw new Error(`Unknown dtype ${dtype}`);\n }\n}\n\n/**\n * Returns the approximate number of bytes allocated in the string array - 2\n * bytes per character. Computing the exact bytes for a native string in JS is\n * not possible since it depends on the encoding of the html page that serves\n * the website.\n */\nexport function bytesFromStringArray(arr: Uint8Array[]): number {\n if (arr == null) {\n return 0;\n }\n let bytes = 0;\n arr.forEach(x => bytes += x.length);\n return bytes;\n}\n\n/** Returns true if the value is a string. */\nexport function isString(value: {}): value is string {\n return typeof value === 'string' || value instanceof String;\n}\n\nexport function isBoolean(value: {}): boolean {\n return typeof value === 'boolean';\n}\n\nexport function isNumber(value: {}): boolean {\n return typeof value === 'number';\n}\n\nexport function inferDtype(values: TensorLike): DataType {\n if (Array.isArray(values)) {\n return inferDtype(values[0]);\n }\n if (values instanceof Float32Array) {\n return 'float32';\n } else if (values instanceof Int32Array || values instanceof Uint8Array) {\n return 'int32';\n } else if (isNumber(values)) {\n return 'float32';\n } else if (isString(values)) {\n return 'string';\n } else if (isBoolean(values)) {\n return 'bool';\n }\n return 'float32';\n}\n\nexport function isFunction(f: Function) {\n return !!(f && f.constructor && f.call && f.apply);\n}\n\nexport function nearestDivisor(size: number, start: number): number {\n for (let i = start; i < size; ++i) {\n if (size % i === 0) {\n return i;\n }\n }\n return size;\n}\n\nexport function computeStrides(shape: number[]): number[] {\n const rank = shape.length;\n if (rank < 2) {\n return [];\n }\n\n // Last dimension has implicit stride of 1, thus having D-1 (instead of D)\n // strides.\n const strides = new Array(rank - 1);\n strides[rank - 2] = shape[rank - 1];\n for (let i = rank - 3; i >= 0; --i) {\n strides[i] = strides[i + 1] * shape[i + 1];\n }\n return strides;\n}\n\nfunction createNestedArray(\n offset: number, shape: number[], a: TypedArray, isComplex = false) {\n const ret = new Array();\n if (shape.length === 1) {\n const d = shape[0] * (isComplex ? 2 : 1);\n for (let i = 0; i < d; i++) {\n ret[i] = a[offset + i];\n }\n } else {\n const d = shape[0];\n const rest = shape.slice(1);\n const len = rest.reduce((acc, c) => acc * c) * (isComplex ? 2 : 1);\n for (let i = 0; i < d; i++) {\n ret[i] = createNestedArray(offset + i * len, rest, a, isComplex);\n }\n }\n return ret;\n}\n\n// Provide a nested array of TypedArray in given shape.\nexport function toNestedArray(\n shape: number[], a: TypedArray, isComplex = false) {\n if (shape.length === 0) {\n // Scalar type should return a single number.\n return a[0];\n }\n const size = shape.reduce((acc, c) => acc * c) * (isComplex ? 2 : 1);\n if (size === 0) {\n // A tensor with shape zero should be turned into empty list.\n return [];\n }\n if (size !== a.length) {\n throw new Error(`[${shape}] does not match the input size ${a.length}${\n isComplex ? ' for a complex tensor' : ''}.`);\n }\n\n return createNestedArray(0, shape, a, isComplex);\n}\n\nexport function makeOnesTypedArray(\n size: number, dtype: D): DataTypeMap[D] {\n const array = makeZerosTypedArray(size, dtype);\n for (let i = 0; i < array.length; i++) {\n array[i] = 1;\n }\n return array;\n}\n\nexport function makeZerosTypedArray(\n size: number, dtype: D): DataTypeMap[D] {\n if (dtype == null || dtype === 'float32' || dtype === 'complex64') {\n return new Float32Array(size) as DataTypeMap[D];\n } else if (dtype === 'int32') {\n return new Int32Array(size) as DataTypeMap[D];\n } else if (dtype === 'bool') {\n return new Uint8Array(size) as DataTypeMap[D];\n } else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n}\n\n/**\n * Make nested `TypedArray` filled with zeros.\n * @param shape The shape information for the nested array.\n * @param dtype dtype of the array element.\n */\nexport function makeZerosNestedTypedArray(\n shape: number[], dtype: D) {\n const size = shape.reduce((prev, curr) => prev * curr, 1);\n if (dtype == null || dtype === 'float32') {\n return toNestedArray(shape, new Float32Array(size));\n } else if (dtype === 'int32') {\n return toNestedArray(shape, new Int32Array(size));\n } else if (dtype === 'bool') {\n return toNestedArray(shape, new Uint8Array(size));\n } else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n}\n\nexport function assertNonNegativeIntegerDimensions(shape: number[]) {\n shape.forEach(dimSize => {\n assert(\n Number.isInteger(dimSize) && dimSize >= 0,\n () =>\n `Tensor must have a shape comprised of positive integers but got ` +\n `shape [${shape}].`);\n });\n}\n\n/**\n * Computes flat index for a given location (multidimentionsal index) in a\n * Tensor/multidimensional array.\n *\n * @param locs Location in the tensor.\n * @param rank Rank of the tensor.\n * @param strides Tensor strides.\n */\nexport function locToIndex(\n locs: number[], rank: number, strides: number[]): number {\n if (rank === 0) {\n return 0;\n } else if (rank === 1) {\n return locs[0];\n }\n let index = locs[locs.length - 1];\n for (let i = 0; i < locs.length - 1; ++i) {\n index += strides[i] * locs[i];\n }\n return index;\n}\n\n/**\n * Computes the location (multidimensional index) in a tensor/multidimentional\n * array for a given flat index.\n *\n * @param index Index in flat array.\n * @param rank Rank of tensor.\n * @param strides Strides of tensor.\n */\nexport function indexToLoc(\n index: number, rank: number, strides: number[]): number[] {\n if (rank === 0) {\n return [];\n } else if (rank === 1) {\n return [index];\n }\n const locs: number[] = new Array(rank);\n for (let i = 0; i < locs.length - 1; ++i) {\n locs[i] = Math.floor(index / strides[i]);\n index -= locs[i] * strides[i];\n }\n locs[locs.length - 1] = index;\n return locs;\n}\n\n/**\n * This method asserts whether an object is a Promise instance.\n * @param object\n */\n// tslint:disable-next-line: no-any\nexport function isPromise(object: any) {\n // We chose to not use 'obj instanceOf Promise' for two reasons:\n // 1. It only reliably works for es6 Promise, not other Promise\n // implementations.\n // 2. It doesn't work with framework that uses zone.js. zone.js monkey patch\n // the async calls, so it is possible the obj (patched) is comparing to a\n // pre-patched Promise.\n return object && object.then && typeof object.then === 'function';\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env} from './environment';\n\nexport function warn(...msg: Array<{}>): void {\n if (!(env().getBool('IS_TEST') || env().getBool('PROD'))) {\n console.warn(...msg);\n }\n}\n\nexport function log(...msg: Array<{}>): void {\n if (!(env().getBool('IS_TEST') || env().getBool('PROD'))) {\n console.log(...msg);\n }\n}\n", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Platform} from './platforms/platform';\nimport {isPromise} from './util_base';\nimport * as log from './log';\n\n// Expects flags from URL in the format ?tfjsflags=FLAG1:1,FLAG2:true.\nconst TENSORFLOWJS_FLAGS_PREFIX = 'tfjsflags';\n\ntype FlagValue = number|boolean;\ntype FlagEvaluationFn = (() => FlagValue)|(() => Promise);\nexport type Flags = {\n [featureName: string]: FlagValue\n};\nexport type FlagRegistryEntry = {\n evaluationFn: FlagEvaluationFn;\n setHook?: (value: FlagValue) => void;\n};\n\n/**\n * The environment contains evaluated flags as well as the registered platform.\n * This is always used as a global singleton and can be retrieved with\n * `tf.env()`.\n *\n * @doc {heading: 'Environment'}\n */\nexport class Environment {\n private flags: Flags = {};\n private flagRegistry: {[flagName: string]: FlagRegistryEntry} = {};\n\n private urlFlags: Flags = {};\n\n platformName: string;\n platform: Platform;\n\n // Jasmine spies on this in 'environment_test.ts'\n getQueryParams = getQueryParams;\n\n // tslint:disable-next-line: no-any\n constructor(public global: any) {\n this.populateURLFlags();\n }\n\n setPlatform(platformName: string, platform: Platform) {\n if (this.platform != null) {\n log.warn(\n `Platform ${this.platformName} has already been set. ` +\n `Overwriting the platform with ${platform}.`);\n }\n this.platformName = platformName;\n this.platform = platform;\n }\n\n registerFlag(\n flagName: string, evaluationFn: FlagEvaluationFn,\n setHook?: (value: FlagValue) => void) {\n this.flagRegistry[flagName] = {evaluationFn, setHook};\n\n // Override the flag value from the URL. This has to happen here because the\n // environment is initialized before flags get registered.\n if (this.urlFlags[flagName] != null) {\n const flagValue = this.urlFlags[flagName];\n log.warn(\n `Setting feature override from URL ${flagName}: ${flagValue}.`);\n this.set(flagName, flagValue);\n }\n }\n\n async getAsync(flagName: string): Promise {\n if (flagName in this.flags) {\n return this.flags[flagName];\n }\n\n this.flags[flagName] = await this.evaluateFlag(flagName);\n return this.flags[flagName];\n }\n\n get(flagName: string): FlagValue {\n if (flagName in this.flags) {\n return this.flags[flagName];\n }\n\n const flagValue = this.evaluateFlag(flagName);\n if (isPromise(flagValue)) {\n throw new Error(\n `Flag ${flagName} cannot be synchronously evaluated. ` +\n `Please use getAsync() instead.`);\n }\n\n this.flags[flagName] = flagValue as number | boolean;\n\n return this.flags[flagName];\n }\n\n getNumber(flagName: string): number {\n return this.get(flagName) as number;\n }\n\n getBool(flagName: string): boolean {\n return this.get(flagName) as boolean;\n }\n\n getFlags(): Flags {\n return this.flags;\n }\n // For backwards compatibility.\n get features(): Flags {\n return this.flags;\n }\n\n set(flagName: string, value: FlagValue): void {\n if (this.flagRegistry[flagName] == null) {\n throw new Error(\n `Cannot set flag ${flagName} as it has not been registered.`);\n }\n this.flags[flagName] = value;\n if (this.flagRegistry[flagName].setHook != null) {\n this.flagRegistry[flagName].setHook(value);\n }\n }\n\n private evaluateFlag(flagName: string): FlagValue|Promise {\n if (this.flagRegistry[flagName] == null) {\n throw new Error(\n `Cannot evaluate flag '${flagName}': no evaluation function found.`);\n }\n return this.flagRegistry[flagName].evaluationFn();\n }\n\n setFlags(flags: Flags) {\n this.flags = Object.assign({}, flags);\n }\n\n reset() {\n this.flags = {};\n this.urlFlags = {};\n this.populateURLFlags();\n }\n\n private populateURLFlags(): void {\n if (typeof this.global === 'undefined' ||\n typeof this.global.location === 'undefined' ||\n typeof this.global.location.search === 'undefined') {\n return;\n }\n\n const urlParams = this.getQueryParams(this.global.location.search);\n if (TENSORFLOWJS_FLAGS_PREFIX in urlParams) {\n const keyValues = urlParams[TENSORFLOWJS_FLAGS_PREFIX].split(',');\n keyValues.forEach(keyValue => {\n const [key, value] = keyValue.split(':') as [string, string];\n this.urlFlags[key] = parseValue(key, value);\n });\n }\n }\n}\n\nexport function getQueryParams(queryString: string): {[key: string]: string} {\n const params = {};\n queryString.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g, (s, ...t) => {\n decodeParam(params, t[0], t[1]);\n return t.join('=');\n });\n return params;\n}\n\nfunction decodeParam(\n params: {[key: string]: string}, name: string, value?: string) {\n params[decodeURIComponent(name)] = decodeURIComponent(value || '');\n}\n\nfunction parseValue(flagName: string, value: string): FlagValue {\n value = value.toLowerCase();\n if (value === 'true' || value === 'false') {\n return value === 'true';\n } else if (`${+ value}` === value) {\n return +value;\n }\n throw new Error(\n `Could not parse value flag value ${value} for flag ${flagName}.`);\n}\n\n/**\n * Returns the current environment (a global singleton).\n *\n * The environment object contains the evaluated feature values as well as the\n * active platform.\n *\n * @doc {heading: 'Environment'}\n */\nexport function env() {\n return ENV;\n}\n\nexport let ENV: Environment = null;\nexport function setEnvironmentGlobal(environment: Environment) {\n ENV = environment;\n}\n", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// Note that the identifier globalNameSpace is scoped to this module, but will\n// always resolve to the same global object regardless of how the module is\n// resolved.\n// tslint:disable-next-line:no-any\nlet globalNameSpace: {_tfGlobals: Map};\n// tslint:disable-next-line:no-any\nexport function getGlobalNamespace(): {_tfGlobals: Map} {\n if (globalNameSpace == null) {\n // tslint:disable-next-line:no-any\n let ns: any;\n if (typeof (window) !== 'undefined') {\n ns = window;\n } else if (typeof (global) !== 'undefined') {\n ns = global;\n } else if (typeof (process) !== 'undefined') {\n ns = process;\n } else if (typeof (self) !== 'undefined') {\n ns = self;\n } else {\n throw new Error('Could not find a global object');\n }\n globalNameSpace = ns;\n }\n return globalNameSpace;\n}\n\n// tslint:disable-next-line:no-any\nfunction getGlobalMap(): Map {\n const ns = getGlobalNamespace();\n if (ns._tfGlobals == null) {\n ns._tfGlobals = new Map();\n }\n return ns._tfGlobals;\n}\n\n/**\n * Returns a globally accessible 'singleton' object.\n *\n * @param key the name of the object\n * @param init a function to initialize to initialize this object\n * the first time it is fetched.\n */\nexport function getGlobal(key: string, init: () => T): T {\n const globalMap = getGlobalMap();\n if (globalMap.has(key)) {\n return globalMap.get(key);\n } else {\n const singleton = init();\n globalMap.set(key, singleton);\n return globalMap.get(key);\n }\n}\n", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// Allow UpperCamelCase variable names\n// tslint:disable: variable-name\n// Unfortunately just enabling PascalCase per file (tslint:enable:\n// allow-pascal-case) doesn't work.\nimport {NamedTensorInfoMap, TensorInfo} from './kernel_registry';\nimport {ExplicitPadding} from './ops/conv_util';\nimport {Activation} from './ops/fused_types';\nimport {DataType, PixelData} from './types';\n\nexport const Abs = 'Abs';\nexport type AbsInputs = UnaryInputs;\n\nexport const Acos = 'Acos';\nexport type AcosInputs = UnaryInputs;\n\nexport const Acosh = 'Acosh';\nexport type AcoshInputs = UnaryInputs;\n\nexport const Add = 'Add';\nexport type AddInputs = BinaryInputs;\n\nexport const AddN = 'AddN';\nexport type AddNInputs = TensorInfo[];\n\nexport const All = 'All';\nexport type AllInputs = Pick;\nexport interface AllAttrs {\n axis: number|number[];\n keepDims: boolean;\n}\n\nexport const Any = 'Any';\nexport type AnyInputs = Pick;\nexport interface AnyAttrs {\n axis: number|number[];\n keepDims: boolean;\n}\n\nexport const ArgMax = 'ArgMax';\nexport type ArgMaxInputs = Pick;\nexport interface ArgMaxAttrs {\n axis: number;\n}\n\nexport const ArgMin = 'ArgMin';\nexport type ArgMinInputs = Pick;\nexport interface ArgMinAttrs {\n axis: number;\n}\n\nexport const Asin = 'Asin';\nexport type AsinInputs = UnaryInputs;\n\nexport const Asinh = 'Asinh';\nexport type AsinhInputs = UnaryInputs;\n\nexport const Atan = 'Atan';\nexport type AtanInputs = UnaryInputs;\n\nexport const Atanh = 'Atanh';\nexport type AtanhInputs = UnaryInputs;\n\nexport const Atan2 = 'Atan2';\nexport type Atan2Inputs = BinaryInputs;\n\nexport const AvgPool = 'AvgPool';\nexport type AvgPoolInputs = Pick;\nexport interface AvgPoolAttrs {\n filterSize: [number, number]|number;\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const AvgPoolGrad = 'AvgPoolGrad';\nexport type AvgPoolGradInputs = Pick;\nexport interface AvgPoolGradAttrs {\n filterSize: [number, number]|number;\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n}\n\nexport const AvgPool3D = 'AvgPool3D';\nexport type AvgPool3DInputs = Pick;\nexport interface AvgPool3DAttrs {\n filterSize: [number, number, number]|number;\n strides: [number, number, number]|number;\n pad: 'valid'|'same'|number;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n dataFormat: 'NDHWC'|'NCDHW';\n}\n\nexport const AvgPool3DGrad = 'AvgPool3DGrad';\nexport type AvgPool3DGradInputs = Pick;\nexport interface AvgPool3DGradAttrs {\n filterSize: [number, number, number]|number;\n strides: [number, number, number]|number;\n pad: 'valid'|'same'|number;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const BatchMatMul = 'BatchMatMul';\nexport type BatchMatMulInputs = Pick;\nexport interface BatchMatMulAttrs {\n transposeA: boolean;\n transposeB: boolean;\n}\n\nexport const BatchToSpaceND = 'BatchToSpaceND';\nexport type BatchToSpaceNDInputs = Pick;\nexport interface BatchToSpaceNDAttrs {\n blockShape: number[];\n crops: number[][];\n}\n\nexport type BinaryInputs = Pick;\n\nexport const Bincount = 'Bincount';\nexport type BincountInputs = Pick;\nexport interface BincountAttrs {\n size: number;\n}\n\nexport const BroadcastTo = 'BroadcastTo';\nexport type BroadcastToInputs = Pick;\nexport interface BroadCastToAttrs {\n shape: number[];\n inputShape: number[]; // for gradient\n}\n\nexport const BroadcastArgs = 'BroadcastArgs';\nexport type BroadcastArgsInputs = Pick;\n\nexport const Cast = 'Cast';\nexport type CastInputs = UnaryInputs;\nexport interface CastAttrs {\n dtype: DataType;\n}\n\nexport const Ceil = 'Ceil';\nexport type CeilInputs = UnaryInputs;\n\nexport const ClipByValue = 'ClipByValue';\nexport type ClipByValueInputs = UnaryInputs;\nexport interface ClipByValueAttrs {\n clipValueMin: number;\n clipValueMax: number;\n}\n\nexport const Complex = 'Complex';\nexport type ComplexInputs = Pick;\n\nexport const ComplexAbs = 'ComplexAbs';\nexport type ComplexAbsInputs = UnaryInputs;\n\nexport const Concat = 'Concat';\nexport type ConcatInputs = TensorInfo[];\nexport interface ConcatAttrs {\n axis: number;\n}\n\nexport const Conv2D = 'Conv2D';\nexport type Conv2DInputs = Pick;\nexport interface Conv2DAttrs {\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dataFormat: 'NHWC'|'NCHW';\n dilations: [number, number]|number;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const Conv2DBackpropFilter = 'Conv2DBackpropFilter';\nexport type Conv2DBackpropFilterInputs = Pick;\nexport interface Conv2DBackpropFilterAttrs {\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dataFormat: 'NHWC'|'NCHW';\n dimRoundingMode?: 'floor'|'round'|'ceil';\n filterShape: [number, number, number, number];\n}\n\nexport const Conv2DBackpropInput = 'Conv2DBackpropInput';\nexport type Conv2DBackpropInputInputs = Pick;\nexport interface Conv2DBackpropInputAttrs {\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dataFormat: 'NHWC'|'NCHW';\n dimRoundingMode?: 'floor'|'round'|'ceil';\n inputShape: [number, number, number, number];\n}\n\nexport const Conv3D = 'Conv3D';\nexport type Conv3DInputs = Pick;\nexport interface Conv3DAttrs {\n strides: [number, number, number]|number;\n pad: 'valid'|'same';\n dataFormat: 'NDHWC'|'NCDHW';\n dilations: [number, number, number]|number;\n}\n\nexport const Conv3DBackpropFilterV2 = 'Conv3DBackpropFilterV2';\nexport type Conv3DBackpropFilterV2Inputs = Pick;\n\nexport interface Conv3DBackpropFilterV2Attrs {\n strides: [number, number, number]|number;\n pad: 'valid'|'same';\n filterShape: [number, number, number, number, number];\n}\n\nexport const Conv3DBackpropInputV2 = 'Conv3DBackpropInputV2';\nexport type Conv3DBackpropInputV2Inputs =\n Pick;\nexport interface Conv3DBackpropInputV2Attrs {\n strides: [number, number, number]|number;\n pad: 'valid'|'same';\n inputShape: [number, number, number, number, number];\n}\n\nexport const Cos = 'Cos';\nexport type CosInputs = UnaryInputs;\n\nexport const Cosh = 'Cosh';\nexport type CoshInputs = UnaryInputs;\n\nexport const Cumsum = 'Cumsum';\nexport type CumsumInputs = Pick;\nexport interface CumsumAttrs {\n axis: number;\n exclusive: boolean;\n reverse: boolean;\n}\n\nexport const CropAndResize = 'CropAndResize';\nexport type CropAndResizeInputs =\n Pick;\nexport interface CropAndResizeAttrs {\n cropSize: [number, number];\n method: 'bilinear'|'nearest';\n extrapolationValue: number;\n}\n\nexport const DenseBincount = 'DenseBincount';\nexport type DenseBincountInputs = Pick;\nexport interface DenseBincountAttrs {\n size: number;\n binaryOutput?: boolean;\n}\n\nexport const DepthToSpace = 'DepthToSpace';\nexport type DepthToSpaceInputs = Pick;\nexport interface DepthToSpaceAttrs {\n blockSize: number;\n dataFormat: 'NHWC'|'NCHW';\n}\n\nexport const DepthwiseConv2dNative = 'DepthwiseConv2dNative';\nexport type DepthwiseConv2dNativeInputs =\n Pick;\nexport interface DepthwiseConv2dNativeAttrs {\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dataFormat: 'NHWC'|'NCHW';\n dilations: [number, number]|number;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const DepthwiseConv2dNativeBackpropFilter =\n 'DepthwiseConv2dNativeBackpropFilter';\nexport type DepthwiseConv2dNativeBackpropFilterInputs =\n Pick;\nexport interface DepthwiseConv2dNativeBackpropFilterAttrs {\n strides: [number, number]|number;\n dilations: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n filterShape: [number, number, number, number];\n}\n\nexport const DepthwiseConv2dNativeBackpropInput =\n 'DepthwiseConv2dNativeBackpropInput';\nexport type DepthwiseConv2dNativeBackpropInputInputs =\n Pick;\nexport interface DepthwiseConv2dNativeBackpropInputAttrs {\n strides: [number, number]|number;\n dilations: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n inputShape: [number, number, number, number];\n}\n\nexport const Diag = 'Diag';\nexport type DiagInputs = Pick;\n\nexport const Dilation2D = 'Dilation2D';\nexport type Dilation2DInputs = Pick;\nexport interface Dilation2DAttrs {\n strides: [number, number]|number;\n pad: 'valid'|'same'|number;\n dilations: [number, number]|number;\n}\n\nexport const Dilation2DBackpropInput = 'Dilation2DBackpropInput';\nexport type Dilation2DBackpropInputInputs =\n Pick;\n\nexport const Dilation2DBackpropFilter = 'Dilation2DBackpropFilter';\nexport type Dilation2DBackpropFilterInputs =\n Pick;\n\nexport const RealDiv = 'RealDiv';\nexport type RealDivInputs = BinaryInputs;\n\nexport const Einsum = 'Einsum';\nexport type EinsumInputs = TensorInfo[];\nexport interface EinsumAttrs {\n equation: string;\n}\n\nexport const Elu = 'Elu';\nexport type EluInputs = Pick;\n\nexport const EluGrad = 'EluGrad';\nexport type EluGradInputs = Pick;\n\nexport const Erf = 'Erf';\nexport type ErfInputs = UnaryInputs;\n\nexport const Equal = 'Equal';\nexport type EqualInputs = BinaryInputs;\n\nexport const Exp = 'Exp';\nexport type ExpInputs = UnaryInputs;\n\nexport const ExpandDims = 'ExpandDims';\nexport type ExpandDimsInputs = Pick;\nexport interface ExpandDimsAttrs {\n dim: number;\n}\n\nexport const Expm1 = 'Expm1';\nexport type Expm1Inputs = UnaryInputs;\n\nexport const FFT = 'FFT';\nexport type FFTInputs = Pick;\n\nexport const Fill = 'Fill';\nexport interface FillAttrs {\n shape: number[];\n value: number|string;\n dtype: DataType;\n}\n\nexport const FlipLeftRight = 'FlipLeftRight';\nexport type FlipLeftRightInputs = Pick;\n\nexport const Floor = 'Floor';\nexport type FloorInputs = UnaryInputs;\n\nexport const FloorDiv = 'FloorDiv';\nexport type FloorDivInputs = BinaryInputs;\n\nexport const FusedBatchNorm = 'FusedBatchNorm';\nexport type FusedBatchNormInputs =\n Pick;\nexport interface FusedBatchNormAttrs {\n varianceEpsilon: number;\n}\n\nexport const GatherV2 = 'GatherV2';\nexport type GatherV2Inputs = Pick;\nexport interface GatherV2Attrs {\n axis: number;\n batchDims: number;\n}\n\nexport const GatherNd = 'GatherNd';\nexport type GatherNdInputs = Pick;\n\nexport const Greater = 'Greater';\nexport type GreaterInputs = BinaryInputs;\n\nexport const GreaterEqual = 'GreaterEqual';\nexport type GreaterEqualInputs = BinaryInputs;\n\nexport const Identity = 'Identity';\nexport type IdentityInputs = Pick;\n\nexport const IFFT = 'IFFT';\nexport type IFFTInputs = Pick;\n\nexport const Imag = 'Imag';\nexport type ImagInputs = Pick;\n\nexport const IsFinite = 'IsFinite';\nexport type IsFiniteInputs = UnaryInputs;\n\nexport const IsInf = 'IsInf';\nexport type IsInfInputs = UnaryInputs;\n\nexport const IsNan = 'IsNan';\nexport type IsNanInputs = UnaryInputs;\n\nexport const LeakyRelu = 'LeakyRelu';\nexport type LeakyReluInputs = Pick;\nexport interface LeakyReluAttrs {\n alpha: number;\n}\n\nexport const Less = 'Less';\nexport type LessInputs = BinaryInputs;\n\nexport const LessEqual = 'LessEqual';\nexport type LessEqualInputs = BinaryInputs;\n\nexport const LinSpace = 'LinSpace';\nexport interface LinSpaceAttrs {\n start: number;\n stop: number;\n num: number;\n}\nexport const Log = 'Log';\nexport type LogInputs = UnaryInputs;\n\nexport const Log1p = 'Log1p';\nexport type Log1pInputs = UnaryInputs;\n\nexport const LogicalAnd = 'LogicalAnd';\nexport type LogicalAndInputs = BinaryInputs;\n\nexport const LogicalNot = 'LogicalNot';\nexport type LogicalNotInputs = Pick;\n\nexport const LogicalOr = 'LogicalOr';\nexport type LogicalOrInputs = BinaryInputs;\n\nexport const LogSoftmax = 'LogSoftmax';\nexport type LogSoftmaxInputs = Pick;\nexport interface LogSoftmaxAttrs {\n axis: number;\n}\n\nexport const LRN = 'LRN';\nexport type LRNInputs = Pick;\nexport interface LRNAttrs {\n depthRadius: number;\n bias: number;\n alpha: number;\n beta: number;\n}\n\nexport const LRNGrad = 'LRNGrad';\nexport type LRNGradInputs = Pick;\nexport interface LRNGradAttrs {\n depthRadius: number;\n bias: number;\n alpha: number;\n beta: number;\n}\n\nexport const Max = 'Max';\nexport type MaxInputs = Pick;\nexport interface MaxAttrs {\n reductionIndices: number|number[];\n keepDims: boolean;\n}\n\nexport const Maximum = 'Maximum';\nexport type MaximumInputs = BinaryInputs;\n\nexport const MaxPool = 'MaxPool';\nexport type MaxPoolInputs = Pick;\nexport interface MaxPoolAttrs {\n filterSize: [number, number]|number;\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const MaxPoolGrad = 'MaxPoolGrad';\nexport type MaxPoolGradInputs = Pick;\nexport interface MaxPoolGradAttrs {\n filterSize: [number, number]|number;\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const MaxPool3D = 'MaxPool3D';\nexport type MaxPool3DInputs = Pick;\nexport interface MaxPool3DAttrs {\n filterSize: [number, number, number]|number;\n strides: [number, number, number]|number;\n pad: 'valid'|'same'|number;\n dataFormat: 'NDHWC'|'NCDHW';\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const MaxPool3DGrad = 'MaxPool3DGrad';\nexport type MaxPool3DGradInputs =\n Pick;\nexport interface MaxPool3DGradAttrs {\n filterSize: [number, number, number]|number;\n strides: [number, number, number]|number;\n pad: 'valid'|'same'|number;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const MaxPoolWithArgmax = 'MaxPoolWithArgmax';\nexport type MaxPoolWithArgmaxInputs = Pick;\nexport interface MaxPoolWithArgmaxAttrs {\n filterSize: [number, number]|number;\n strides: [number, number]|number;\n pad: 'valid'|'same'|number;\n includeBatchInIndex: boolean;\n}\n\nexport const Mean = 'Mean';\nexport type MeanInputs = Pick;\nexport interface MeanAttrs {\n axis: number|number[];\n keepDims: boolean;\n}\n\nexport const Min = 'Min';\nexport type MinInputs = Pick;\nexport interface MinAttrs {\n axis: number|number[];\n keepDims: boolean;\n}\n\nexport const Minimum = 'Minimum';\nexport type MinimumInputs = BinaryInputs;\n\nexport const MirrorPad = 'MirrorPad';\nexport type MirrorPadInputs = Pick;\nexport interface MirrorPadAttrs {\n paddings: Array<[number, number]>;\n mode: 'reflect'|'symmetric';\n}\n\nexport const Mod = 'Mod';\nexport type ModInputs = BinaryInputs;\n\nexport const Multinomial = 'Multinomial';\nexport type MultinomialInputs = Pick;\nexport interface MultinomialAttrs {\n numSamples: number;\n seed: number;\n normalized: boolean;\n}\n\nexport const Multiply = 'Multiply';\nexport type MultiplyInputs = BinaryInputs;\n\nexport const Neg = 'Neg';\nexport type NegInputs = UnaryInputs;\n\nexport const NotEqual = 'NotEqual';\nexport type NotEqualInputs = BinaryInputs;\n\nexport const NonMaxSuppressionV3 = 'NonMaxSuppressionV3';\nexport type NonMaxSuppressionV3Inputs =\n Pick;\nexport interface NonMaxSuppressionV3Attrs {\n maxOutputSize: number;\n iouThreshold: number;\n scoreThreshold: number;\n}\n\nexport const NonMaxSuppressionV4 = 'NonMaxSuppressionV4';\nexport type NonMaxSuppressionV4Inputs =\n Pick;\nexport interface NonMaxSuppressionV4Attrs {\n maxOutputSize: number;\n iouThreshold: number;\n scoreThreshold: number;\n padToMaxOutputSize: boolean;\n}\n\nexport const NonMaxSuppressionV5 = 'NonMaxSuppressionV5';\nexport type NonMaxSuppressionV5Inputs =\n Pick;\nexport interface NonMaxSuppressionV5Attrs {\n maxOutputSize: number;\n iouThreshold: number;\n scoreThreshold: number;\n softNmsSigma: number;\n}\n\nexport const OnesLike = 'OnesLike';\nexport type OnesLikeInputs = UnaryInputs;\n\nexport const OneHot = 'OneHot';\nexport type OneHotInputs = Pick;\nexport interface OneHotAttrs {\n depth: number;\n onValue: number;\n offValue: number;\n}\n\nexport const Pack = 'Pack';\nexport type PackInputs = TensorInfo[];\nexport interface PackAttrs {\n axis: number;\n}\n\nexport const PadV2 = 'PadV2';\nexport type PadV2Inputs = Pick;\nexport interface PadV2Attrs {\n paddings: Array<[number, number]>;\n constantValue: number;\n}\n\nexport const Pool = 'Pool';\nexport type PoolInputs = Pick;\n\nexport const Pow = 'Pow';\nexport type PowInputs = BinaryInputs;\n\nexport const Prelu = 'Prelu';\nexport type PreluInputs = Pick;\n\nexport const Prod = 'Prod';\nexport type ProdInputs = Pick;\nexport interface ProdAttrs {\n axis: number|number[];\n keepDims: boolean;\n}\n\nexport const Range = 'Range';\nexport interface RangeAttrs {\n start: number;\n stop: number;\n step: number;\n dtype: 'float32'|'int32';\n}\n\nexport const Real = 'Real';\nexport type RealInputs = Pick;\n\nexport const Reciprocal = 'Reciprocal';\nexport type ReciprocalInputs = UnaryInputs;\n\nexport const Relu = 'Relu';\nexport type ReluInputs = Pick;\n\nexport const Reshape = 'Reshape';\nexport type ReshapeInputs = Pick;\nexport interface ReshapeAttrs {\n shape: number[];\n}\n\nexport const ResizeNearestNeighbor = 'ResizeNearestNeighbor';\nexport type ResizeNearestNeighborInputs = Pick;\nexport interface ResizeNearestNeighborAttrs {\n alignCorners: boolean;\n halfPixelCenters: boolean;\n size: [number, number];\n}\n\nexport const ResizeNearestNeighborGrad = 'ResizeNearestNeighborGrad';\nexport type ResizeNearestNeighborGradInputs =\n Pick;\nexport type ResizeNearestNeighborGradAttrs = ResizeNearestNeighborAttrs;\n\nexport const ResizeBilinear = 'ResizeBilinear';\nexport type ResizeBilinearInputs = Pick;\nexport interface ResizeBilinearAttrs {\n alignCorners: boolean;\n halfPixelCenters: boolean;\n size: [number, number];\n}\n\nexport const ResizeBilinearGrad = 'ResizeBilinearGrad';\nexport type ResizeBilinearGradInputs = Pick;\nexport type ResizeBilinearGradAttrs = ResizeBilinearAttrs;\n\nexport const Relu6 = 'Relu6';\nexport type Relu6Inputs = Pick;\n\nexport const Reverse = 'Reverse';\nexport type ReverseInputs = Pick;\nexport interface ReverseAttrs {\n dims: number|number[];\n}\n\nexport const Round = 'Round';\nexport type RoundInputs = UnaryInputs;\n\nexport const Rsqrt = 'Rsqrt';\nexport type RsqrtInputs = UnaryInputs;\n\nexport const ScatterNd = 'ScatterNd';\nexport type ScatterNdInputs = Pick;\nexport interface ScatterNdAttrs {\n shape: number[];\n}\n\nexport const Select = 'Select';\nexport type SelectInputs = Pick;\n\nexport const Selu = 'Selu';\nexport type SeluInputs = Pick;\n\nexport const Slice = 'Slice';\nexport type SliceInputs = Pick;\nexport interface SliceAttrs {\n begin: number|number[];\n size: number|number[];\n}\nexport const Sin = 'Sin';\nexport type SinInputs = UnaryInputs;\n\nexport const Sinh = 'Sinh';\nexport type SinhInputs = UnaryInputs;\n\nexport const Sign = 'Sign';\nexport type SignInputs = UnaryInputs;\n\nexport const Sigmoid = 'Sigmoid';\nexport type SigmoidInputs = UnaryInputs;\n\nexport const Softplus = 'Softplus';\nexport type SoftplusInputs = UnaryInputs;\n\nexport const Sqrt = 'Sqrt';\nexport type SqrtInputs = UnaryInputs;\n\nexport const Sum = 'Sum';\nexport type SumInputs = Pick;\nexport interface SumAttrs {\n axis: number|number[];\n keepDims: boolean;\n}\n\nexport const SpaceToBatchND = 'SpaceToBatchND';\nexport type SpaceToBatchNDInputs = Pick;\nexport interface SpaceToBatchNDAttrs {\n blockShape: number[];\n paddings: number[][];\n}\n\nexport const SplitV = 'SplitV';\nexport type SplitVInputs = Pick;\nexport interface SplitVAttrs {\n numOrSizeSplits: number[]|number;\n axis: number;\n}\n\nexport const Softmax = 'Softmax';\nexport type SoftmaxInputs = Pick;\nexport interface SoftmaxAttrs {\n dim: number;\n}\n\nexport const SparseFillEmptyRows = 'SparseFillEmptyRows';\nexport type SparseFillEmptyRowsInputs =\n Pick;\n\nexport const SparseReshape = 'SparseReshape';\nexport type SparseReshapeInputs =\n Pick;\n\nexport const SparseSegmentMean = 'SparseSegmentMean';\nexport type SparseSegmentMeanInputs =\n Pick;\n\nexport const SparseSegmentSum = 'SparseSegmentSum';\nexport type SparseSegmentSumInputs =\n Pick;\n\nexport const SparseToDense = 'SparseToDense';\nexport type SparseToDenseInputs =\n Pick;\nexport interface SparseToDenseAttrs {\n outputShape: number[];\n}\n\nexport const SquaredDifference = 'SquaredDifference';\nexport type SquaredDifferenceInputs = BinaryInputs;\n\nexport const Square = 'Square';\nexport type SquareInputs = Pick;\n\nexport const StridedSlice = 'StridedSlice';\nexport type StridedSliceInputs = Pick;\nexport interface StridedSliceAttrs {\n begin: number[];\n end: number[];\n strides: number[];\n beginMask: number;\n endMask: number;\n ellipsisMask: number;\n newAxisMask: number;\n shrinkAxisMask: number;\n}\n\nexport const StringNGrams = 'StringNGrams';\nexport type StringNGramsInputs = Pick;\nexport interface StringNGramsAttrs {\n separator: string;\n nGramWidths: number[];\n leftPad: string;\n rightPad: string;\n padWidth: number;\n preserveShortSequences: boolean;\n}\n\nexport const StringSplit = 'StringSplit';\nexport type StringSplitInputs = Pick;\nexport interface StringSplitAttrs {\n skipEmpty: boolean;\n}\n\nexport const StringToHashBucketFast = 'StringToHashBucketFast';\nexport type StringToHashBucketFastInputs = Pick;\nexport interface StringToHashBucketFastAttrs {\n numBuckets: number;\n}\n\nexport const Sub = 'Sub';\nexport type SubInputs = BinaryInputs;\n\nexport const Tan = 'Tan';\nexport type TanInputs = UnaryInputs;\n\nexport const Tanh = 'Tanh';\nexport type TanhInputs = UnaryInputs;\n\nexport const Tile = 'Tile';\nexport type TileInputs = Pick;\nexport interface TileAttrs {\n reps: number[];\n}\n\nexport const TopK = 'TopK';\nexport type TopKInputs = Pick;\nexport interface TopKAttrs {\n k: number;\n sorted: boolean;\n}\n\nexport const Transform = 'Transform';\nexport type TransformInputs = Pick;\nexport interface TransformAttrs {\n interpolation: 'nearest'|'bilinear';\n fillMode: 'constant'|'reflect'|'wrap'|'nearest';\n fillValue: number;\n outputShape?: [number, number];\n}\n\nexport const Transpose = 'Transpose';\nexport type TransposeInputs = Pick;\nexport interface TransposeAttrs {\n perm: number[];\n}\n\nexport const Unique = 'Unique';\nexport type UniqueInputs = Pick;\nexport interface UniqueAttrs {\n axis: number;\n}\n\nexport type UnaryInputs = Pick;\n\nexport const Unpack = 'Unpack';\nexport type UnpackInputs = Pick;\nexport interface UnpackAttrs {\n axis: number;\n}\n\nexport const UnsortedSegmentSum = 'UnsortedSegmentSum';\nexport type UnsortedSegmentSumInputs =\n Pick;\nexport interface UnsortedSegmentSumAttrs {\n numSegments: number;\n}\n\nexport const ZerosLike = 'ZerosLike';\nexport type ZerosLikeInputs = UnaryInputs;\n\n/**\n * TensorFlow.js-only kernels\n */\nexport const Step = 'Step';\nexport type StepInputs = UnaryInputs;\nexport interface StepAttrs {\n alpha: number;\n}\n\nexport const FromPixels = 'FromPixels';\nexport interface FromPixelsInputs {\n pixels: PixelData|ImageData|HTMLImageElement|HTMLCanvasElement|\n HTMLVideoElement|ImageBitmap;\n}\nexport interface FromPixelsAttrs {\n numChannels: number;\n}\n\nexport const RotateWithOffset = 'RotateWithOffset';\nexport type RotateWithOffsetInputs = Pick;\nexport interface RotateWithOffsetAttrs {\n radians: number;\n fillValue: number|[number, number, number];\n center: number|[number, number];\n}\n\nexport const _FusedMatMul = '_FusedMatMul';\n// tslint:disable-next-line: class-name\nexport interface _FusedMatMulInputs extends NamedTensorInfoMap {\n a: TensorInfo;\n b: TensorInfo;\n bias?: TensorInfo;\n preluActivationWeights?: TensorInfo;\n}\n// tslint:disable-next-line: class-name\nexport interface _FusedMatMulAttrs {\n transposeA: boolean;\n transposeB: boolean;\n activation: Activation;\n leakyreluAlpha?: number;\n}\n\nexport const FusedConv2D = 'FusedConv2D';\nexport interface FusedConv2DInputs extends NamedTensorInfoMap {\n x: TensorInfo;\n filter: TensorInfo;\n bias?: TensorInfo;\n preluActivationWeights?: TensorInfo;\n}\nexport interface FusedConv2DAttrs {\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dataFormat: 'NHWC'|'NCHW';\n dilations: [number, number]|number;\n dimRoundingMode: 'floor'|'round'|'ceil';\n activation: Activation;\n leakyreluAlpha?: number;\n}\n\nexport const FusedDepthwiseConv2D = 'FusedDepthwiseConv2D';\nexport interface FusedDepthwiseConv2DInputs extends NamedTensorInfoMap {\n x: TensorInfo;\n filter: TensorInfo;\n bias?: TensorInfo;\n preluActivationWeights?: TensorInfo;\n}\nexport interface FusedDepthwiseConv2DAttrs {\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dataFormat: 'NHWC'|'NCHW';\n dilations: [number, number]|number;\n dimRoundingMode: 'floor'|'round'|'ceil';\n activation: Activation;\n leakyreluAlpha?: number;\n}\n", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {env} from './environment';\nimport {getGlobal} from './global_util';\nimport * as log from './log';\nimport {NamedGradientMap} from './tape';\nimport {Tensor} from './tensor';\nimport {DataType, RecursiveArray} from './types';\n\nconst kernelRegistry =\n getGlobal('kernelRegistry', () => new Map());\nconst gradRegistry =\n getGlobal('gradRegistry', () => new Map());\n\nexport type DataId = object;\n\ntype AttributeValue =\n number|number[]|boolean|boolean[]|string|string[]|NamedAttrMap;\n\n/** These are extra non-tensor/primitive params passed to kernel functions. */\nexport type Attribute = AttributeValue|RecursiveArray;\n\n/** Specifies the code to run when executing a kernel. */\nexport type KernelFunc = (params: {\n inputs: NamedTensorInfoMap,\n backend: {},\n attrs?: NamedAttrMap,\n}) => TensorInfo|TensorInfo[];\n\n/** The function to run when computing a gradient during backprop. */\nexport type GradFunc =\n (dy: Tensor|Tensor[], saved: Tensor[], attrs: NamedAttrMap) =>\n NamedGradientMap;\n\n/** Function that gets called after the backend initializes. */\nexport type KernelSetupFunc = (backend: {}) => void;\n/** Function that gets called right before the backend is disposed. */\nexport type KernelDisposeFunc = KernelSetupFunc;\n\n/** Config object for registering a kernel in the global registry. */\nexport interface KernelConfig {\n kernelName: string;\n backendName: string;\n kernelFunc: KernelFunc;\n setupFunc?: KernelSetupFunc;\n disposeFunc?: KernelDisposeFunc;\n}\n\n/** Config object for registering a gradient in the global registry. */\nexport interface GradConfig {\n kernelName: string;\n inputsToSave?: string[];\n // When saveAllInputs is true, all inputs will be saved. Only use this flag\n // if inputs is an array of Tensors.\n saveAllInputs?: boolean;\n outputsToSave?: boolean[];\n gradFunc: GradFunc;\n}\n\n/** Holds metadata for a given tensor. */\nexport interface TensorInfo {\n dataId: DataId;\n shape: number[];\n dtype: DataType;\n}\n\nexport interface NamedTensorInfoMap {\n [name: string]: TensorInfo|undefined;\n}\n\nexport interface NamedAttrMap {\n [name: string]: Attribute;\n}\n\n/**\n * Returns the kernel function (code) associated with the provided names.\n *\n * @param kernelName The official name of the kernel.\n * @param backendName The official name of the backend.\n */\nexport function getKernel(\n kernelName: string, backendName: string): KernelConfig {\n const key = makeKey(kernelName, backendName);\n return kernelRegistry.get(key);\n}\n\n/**\n * Returns the registered gradient info associated with the provided kernel.\n * @param kernelName The official TF kernel name.\n */\nexport function getGradient(kernelName: string): GradConfig {\n return gradRegistry.get(kernelName);\n}\n\nexport function getKernelsForBackend(backendName: string): KernelConfig[] {\n const it = kernelRegistry.entries();\n const result: KernelConfig[] = [];\n\n while (true) {\n const {done, value} = it.next();\n if (done) {\n break;\n }\n const [key, config] = value;\n const [backend, ] = key.split('_');\n if (backend === backendName) {\n result.push(config);\n }\n }\n return result;\n}\n\n/**\n * Registers the function (forward pass) for the kernel in a global registry.\n *\n * @param config A config object with the following properties:\n * - `kernelName` The official name of the kernel.\n * - `backendName` The official name of the backend.\n * - `kernelFunc` The function to run during the forward pass of the kernel.\n * - `setupFunc` Optional. Gets called once, after the backend initializes.\n * - `disposeFunc` Optional. Gets called once, right before the backend is\n * disposed.\n */\nexport function registerKernel(config: KernelConfig) {\n const {kernelName, backendName} = config;\n const key = makeKey(kernelName, backendName);\n if (kernelRegistry.has(key)) {\n log.warn(\n `The kernel '${kernelName}' for backend ` +\n `'${backendName}' is already registered`);\n }\n kernelRegistry.set(key, config);\n}\n\n/**\n * Registers a gradient function for a given kernel in the global registry,\n * to be used during the back-propagation of that kernel.\n *\n * @param config An object with the following properties:\n * - `kernelName` The name of the kernel that the gradient function is for.\n * - `gradFunc` The function to run during back-propagation.\n */\nexport function registerGradient(config: GradConfig) {\n const {kernelName} = config;\n\n if (gradRegistry.has(kernelName)) {\n // TODO (yassogba) after 3.0 assess whether we need to keep this gated\n // to debug mode.\n if (env().getBool('DEBUG')) {\n log.warn(`Overriding the gradient for '${kernelName}'`);\n }\n }\n gradRegistry.set(kernelName, config);\n}\n\n/**\n * Removes the kernel function from the registry.\n *\n * @param kernelName The official name of the kernel.\n * @param backendName The official name of the backend.\n *\n */\nexport function unregisterKernel(\n kernelName: string, backendName: string): void {\n const key = makeKey(kernelName, backendName);\n if (!kernelRegistry.has(key)) {\n throw new Error(\n `The kernel '${kernelName}' for backend ` +\n `'${backendName}' is not registered`);\n }\n kernelRegistry.delete(key);\n}\n\n/** Removes the registered gradient from the global registry. */\nexport function unregisterGradient(kernelName: string): void {\n if (!gradRegistry.has(kernelName)) {\n throw new Error(\n `The gradient '${kernelName}' for backend is not registered`);\n }\n gradRegistry.delete(kernelName);\n}\n\n/**\n * Finds kernels that have already been registered to a backend and re-registers\n * them for a new backend. Useful for registering custom backends.\n * @param registeredBackendName Already registered backend.\n * @param newBackendName New backend.\n */\nexport function copyRegisteredKernels(\n registeredBackendName: string, newBackendName: string): void {\n const kernels = getKernelsForBackend(registeredBackendName);\n kernels.forEach(kernelConfig => {\n const newKernelConfig =\n Object.assign({}, kernelConfig, {backendName: newBackendName});\n registerKernel(newKernelConfig);\n });\n}\n\nfunction makeKey(kernelName: string, backendName: string) {\n return `${backendName}_${kernelName}`;\n}\n", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env} from './environment';\nimport {BackendValues, DataType, TensorLike, TypedArray} from './types';\nimport * as base from './util_base';\nexport * from './util_base';\nexport * from './hash_util';\n\n/**\n * Create typed array for scalar value. Used for storing in `DataStorage`.\n */\nexport function createScalarValue(\n value: DataType, dtype: DataType): BackendValues {\n if (dtype === 'string') {\n return encodeString(value);\n }\n\n return toTypedArray([value], dtype);\n}\n\nfunction noConversionNeeded(a: TensorLike, dtype: DataType): boolean {\n return (a instanceof Float32Array && dtype === 'float32') ||\n (a instanceof Int32Array && dtype === 'int32') ||\n (a instanceof Uint8Array && dtype === 'bool');\n}\n\nexport function toTypedArray(a: TensorLike, dtype: DataType): TypedArray {\n if (dtype === 'string') {\n throw new Error('Cannot convert a string[] to a TypedArray');\n }\n if (Array.isArray(a)) {\n a = base.flatten(a);\n }\n\n if (env().getBool('DEBUG')) {\n base.checkConversionForErrors(a as number[], dtype);\n }\n if (noConversionNeeded(a, dtype)) {\n return a as TypedArray;\n }\n if (dtype == null || dtype === 'float32' || dtype === 'complex64') {\n return new Float32Array(a as number[]);\n } else if (dtype === 'int32') {\n return new Int32Array(a as number[]);\n } else if (dtype === 'bool') {\n const bool = new Uint8Array((a as number[]).length);\n for (let i = 0; i < bool.length; ++i) {\n if (Math.round((a as number[])[i]) !== 0) {\n bool[i] = 1;\n }\n }\n return bool;\n } else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n}\n\n/**\n * Returns the current high-resolution time in milliseconds relative to an\n * arbitrary time in the past. It works across different platforms (node.js,\n * browsers).\n *\n * ```js\n * console.log(tf.util.now());\n * ```\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function now(): number {\n return env().platform.now();\n}\n\n/**\n * Returns a platform-specific implementation of\n * [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API).\n *\n * If `fetch` is defined on the global object (`window`, `process`, etc.),\n * `tf.util.fetch` returns that function.\n *\n * If not, `tf.util.fetch` returns a platform-specific solution.\n *\n * ```js\n * const resource = await tf.util.fetch('https://unpkg.com/@tensorflow/tfjs');\n * // handle response\n * ```\n *\n * @doc {heading: 'Util'}\n */\nexport function fetch(\n path: string, requestInits?: RequestInit): Promise {\n return env().platform.fetch(path, requestInits);\n}\n\n/**\n * Encodes the provided string into bytes using the provided encoding scheme.\n *\n * @param s The string to encode.\n * @param encoding The encoding scheme. Defaults to utf-8.\n *\n * @doc {heading: 'Util'}\n */\nexport function encodeString(s: string, encoding = 'utf-8'): Uint8Array {\n encoding = encoding || 'utf-8';\n return env().platform.encode(s, encoding);\n}\n\n/**\n * Decodes the provided bytes into a string using the provided encoding scheme.\n * @param bytes The bytes to decode.\n *\n * @param encoding The encoding scheme. Defaults to utf-8.\n *\n * @doc {heading: 'Util'}\n */\nexport function decodeString(bytes: Uint8Array, encoding = 'utf-8'): string {\n encoding = encoding || 'utf-8';\n return env().platform.decode(bytes, encoding);\n}\n", "/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// Workaround for allowing cjs module to be included in bundle created by\n// rollup.\nimport * as LongExports from 'long';\n// tslint:disable-next-line\nconst Long: LongExports.LongConstructor =\n // tslint:disable-next-line\n (LongExports as any).default || LongExports;\n\nexport function hexToLong(hex: string): Long {\n return Long.fromString(hex, true, 16);\n}\n\n// Some primes between 2^63 and 2^64 for various uses.\n// Hex 0xc3a5c85c97cb3127\nconst k0: Long = hexToLong('c3a5c85c97cb3127');\n// Hex 0xb492b66fbe98f273\nconst k1: Long = hexToLong('b492b66fbe98f273');\n// Hex 0x9ae16a3b2f90404f\nconst k2: Long = hexToLong('9ae16a3b2f90404f');\n\nfunction shiftMix(val: Long): Long {\n return val.xor(val.shru(47));\n}\n\nfunction fetch(s: Uint8Array, offset: number, numBytes: number): Long {\n const bytes = s.slice(offset, offset + numBytes);\n return Long.fromBytes(Array.from(bytes), true, true);\n}\n\nfunction fetch64(s: Uint8Array, offset: number): Long {\n return fetch(s, offset, 8);\n}\n\nfunction fetch32(s: Uint8Array, offset: number): Long {\n return fetch(s, offset, 4);\n}\n\nfunction rotate64(val: Long, shift: number): Long {\n // Avoid shifting by 64: doing so yields an undefined result.\n return shift === 0 ? val : val.shru(shift).or(val.shl(64 - shift));\n}\n\nfunction hashLen16(u: Long, v: Long, mul = hexToLong('9ddfea08eb382d69')) {\n // Murmur-inspired hashing.\n let a = u.xor(v).mul(mul);\n a = a.xor(a.shru(47));\n let b = v.xor(a).mul(mul);\n b = b.xor(b.shru(47));\n b = b.mul(mul);\n return b;\n}\n\n// Return a 16-byte hash for 48 bytes. Quick and dirty.\n// Callers do best to use \"random-looking\" values for a and b.\nfunction weakHashLen32WithSeeds(\n w: Long, x: Long, y: Long, z: Long, a: Long, b: Long) {\n a = a.add(w);\n b = rotate64(b.add(a).add(z), 21);\n const c = a;\n a = a.add(x);\n a = a.add(y);\n b = b.add(rotate64(a, 44));\n return [a.add(z), b.add(c)];\n}\n\nfunction weakHashLen32WithSeedsStr(\n s: Uint8Array, offset: number, a: Long, b: Long) {\n return weakHashLen32WithSeeds(\n fetch64(s, offset), fetch64(s, offset + 8), fetch64(s, offset + 16),\n fetch64(s, offset + 24), a, b);\n}\n\nfunction hashLen0to16(s: Uint8Array, len = s.length): Long {\n if (len >= 8) {\n const mul = k2.add(len * 2);\n const a = fetch64(s, 0).add(k2);\n const b = fetch64(s, len - 8);\n const c = rotate64(b, 37).mul(mul).add(a);\n const d = rotate64(a, 25).add(b).mul(mul);\n return hashLen16(c, d, mul);\n }\n if (len >= 4) {\n const mul = k2.add(len * 2);\n const a = fetch32(s, 0);\n return hashLen16(a.shl(3).add(len), fetch32(s, len - 4), mul);\n }\n if (len > 0) {\n const a = s[0];\n const b = s[len >> 1];\n const c = s[len - 1];\n const y = a + (b << 8);\n const z = len + (c << 2);\n return shiftMix(k2.mul(y).xor(k0.mul(z))).mul(k2);\n }\n return k2;\n}\n\nfunction hashLen17to32(s: Uint8Array, len = s.length): Long {\n const mul = k2.add(len * 2);\n const a = fetch64(s, 0).mul(k1);\n const b = fetch64(s, 8);\n const c = fetch64(s, len - 8).mul(mul);\n const d = fetch64(s, len - 16).mul(k2);\n return hashLen16(\n rotate64(a.add(b), 43).add(rotate64(c, 30)).add(d),\n a.add(rotate64(b.add(k2), 18)).add(c), mul);\n}\n\nfunction hashLen33to64(s: Uint8Array, len = s.length): Long {\n const mul = k2.add(len * 2);\n const a = fetch64(s, 0).mul(k2);\n const b = fetch64(s, 8);\n const c = fetch64(s, len - 8).mul(mul);\n const d = fetch64(s, len - 16).mul(k2);\n const y = rotate64(a.add(b), 43).add(rotate64(c, 30)).add(d);\n const z = hashLen16(y, a.add(rotate64(b.add(k2), 18)).add(c), mul);\n const e = fetch64(s, 16).mul(mul);\n const f = fetch64(s, 24);\n const g = y.add(fetch64(s, len - 32)).mul(mul);\n const h = z.add(fetch64(s, len - 24)).mul(mul);\n return hashLen16(\n rotate64(e.add(f), 43).add(rotate64(g, 30)).add(h),\n e.add(rotate64(f.add(a), 18)).add(g), mul);\n}\n\nexport function fingerPrint64(s: Uint8Array, len = s.length): Long {\n const seed: Long = Long.fromNumber(81, true);\n if (len <= 32) {\n if (len <= 16) {\n return hashLen0to16(s, len);\n } else {\n return hashLen17to32(s, len);\n }\n } else if (len <= 64) {\n return hashLen33to64(s, len);\n }\n\n // For strings over 64 bytes we loop. Internal state consists of\n // 56 bytes: v, w, x, y, and z.\n let x = seed;\n let y = seed.mul(k1).add(113);\n\n let z = shiftMix(y.mul(k2).add(113)).mul(k2);\n let v = [Long.UZERO, Long.UZERO];\n let w = [Long.UZERO, Long.UZERO];\n x = x.mul(k2).add(fetch64(s, 0));\n\n let offset = 0;\n // Set end so that after the loop we have 1 to 64 bytes left to process.\n const end = ((len - 1) >> 6) * 64;\n const last64 = end + ((len - 1) & 63) - 63;\n\n do {\n x = rotate64(x.add(y).add(v[0]).add(fetch64(s, offset + 8)), 37).mul(k1);\n y = rotate64(y.add(v[1]).add(fetch64(s, offset + 48)), 42).mul(k1);\n x = x.xor(w[1]);\n y = y.add(v[0]).add(fetch64(s, offset + 40));\n z = rotate64(z.add(w[0]), 33).mul(k1);\n v = weakHashLen32WithSeedsStr(s, offset, v[1].mul(k1), x.add(w[0]));\n w = weakHashLen32WithSeedsStr(\n s, offset + 32, z.add(w[1]), y.add(fetch64(s, offset + 16)));\n\n [z, x] = [x, z];\n offset += 64;\n } while (offset !== end);\n const mul = k1.add(z.and(0xff).shl(1));\n // Point to the last 64 bytes of input.\n offset = last64;\n\n w[0] = w[0].add((len - 1) & 63);\n v[0] = v[0].add(w[0]);\n w[0] = w[0].add(v[0]);\n\n x = rotate64(x.add(y).add(v[0]).add(fetch64(s, offset + 8)), 37).mul(mul);\n y = rotate64(y.add(v[1]).add(fetch64(s, offset + 48)), 42).mul(mul);\n x = x.xor(w[1].mul(9));\n y = y.add(v[0].mul(9).add(fetch64(s, offset + 40)));\n z = rotate64(z.add(w[0]), 33).mul(mul);\n v = weakHashLen32WithSeedsStr(s, offset, v[1].mul(mul), x.add(w[0]));\n w = weakHashLen32WithSeedsStr(\n s, offset + 32, z.add(w[1]), y.add(fetch64(s, offset + 16)));\n\n [z, x] = [x, z];\n\n return hashLen16(\n hashLen16(v[0], w[0], mul).add(shiftMix(y).mul(k0)).add(z),\n hashLen16(v[1], w[1], mul).add(x), mul);\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {BackendTimer, BackendTimingInfo} from './backends/backend';\nimport {env} from './environment';\nimport {Tensor} from './tensor';\nimport {NamedTensorMap} from './tensor_types';\nimport {DataType, DataTypeMap, TypedArray} from './types';\nimport * as util from './util';\n\nexport type KernelProfile = {\n kernelName: string,\n outputs: Tensor[],\n inputs: NamedTensorMap,\n timeMs: Promise,\n extraInfo: Promise\n};\n\nexport class Profiler {\n constructor(private backendTimer: BackendTimer, private logger?: Logger) {\n if (logger == null) {\n this.logger = new Logger();\n }\n }\n\n profileKernel(kernelName: string, inputs: NamedTensorMap, f: () => Tensor[]):\n KernelProfile {\n let outputs: Tensor[];\n const holdResultWrapperFn = () => {\n outputs = f();\n };\n let timer: Promise;\n const start = util.now();\n if (this.backendTimer.timerAvailable()) {\n timer = this.backendTimer.time(holdResultWrapperFn);\n } else {\n holdResultWrapperFn();\n for (const output of outputs) {\n output.dataSync();\n }\n timer = Promise.resolve({kernelMs: util.now() - start});\n }\n if (env().getBool('CHECK_COMPUTATION_FOR_ERRORS')) {\n for (let i = 0; i < outputs.length; i++) {\n const output = outputs[i];\n // Dangling promise here because we don't want to propagate up\n // asynchronicity.\n output.data().then(tensorVals => {\n checkComputationForErrors(tensorVals, output.dtype, kernelName);\n });\n }\n }\n\n const kernelProfile = {\n kernelName,\n outputs,\n inputs,\n timeMs: timer.then(timing => timing.kernelMs),\n extraInfo: timer.then(\n timing => timing.getExtraProfileInfo != null ?\n timing.getExtraProfileInfo() :\n '')\n };\n return kernelProfile;\n }\n\n logKernelProfile(kernelProfile: KernelProfile): void {\n const {kernelName, outputs, timeMs, inputs, extraInfo} = kernelProfile;\n\n outputs.forEach(result => {\n Promise.all([result.data(), timeMs, extraInfo]).then(valueContainer => {\n this.logger.logKernelProfile(\n kernelName, result, valueContainer[0], valueContainer[1], inputs,\n valueContainer[2]);\n });\n });\n }\n}\n\nexport function checkComputationForErrors(\n vals: DataTypeMap[D], dtype: D, kernelName: string): boolean {\n if (dtype !== 'float32') {\n // Only floating point computations will generate NaN values\n return false;\n }\n for (let i = 0; i < vals.length; i++) {\n const num = vals[i] as number;\n if (isNaN(num) || !isFinite(num)) {\n // Throwing custom exception so behavior is testable.\n console.warn(`Found ${num} in the result of '${kernelName}'`);\n return true;\n }\n }\n return false;\n}\n\nexport class Logger {\n logKernelProfile(\n name: string, result: Tensor, vals: TypedArray,\n timeMs: number|{error: string}, inputs: NamedTensorMap,\n extraInfo?: string) {\n const time = typeof timeMs === 'number' ? util.rightPad(`${timeMs}ms`, 9) :\n timeMs['error'];\n const paddedName = util.rightPad(name, 25);\n const rank = result.rank;\n const size = result.size;\n const shape = util.rightPad(result.shape.toString(), 14);\n let inputShapesDescription = '';\n\n for (const name in inputs) {\n const input = inputs[name];\n if (input != null) {\n // The input might be a non-tensor (e.g HTMLImageElement), in which case\n // we claim the output shape as input shape.\n const inputShape = input.shape || result.shape;\n const inputRank = inputShape.length;\n inputShapesDescription +=\n `${name}: ${inputRank}D ${inputRank > 0 ? inputShape : ''} `;\n }\n }\n\n console.log(\n `%c${paddedName}\\t%c${time}\\t%c${rank}D ${shape}\\t%c${size}\\t%c${\n inputShapesDescription}\\t%c${extraInfo}`,\n 'font-weight:bold', 'color:red', 'color:blue', 'color: orange',\n 'color: green', 'color: steelblue');\n }\n}\n", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from './tensor';\nimport {NamedTensorMap} from './tensor_types';\nimport * as util from './util';\n\nexport interface TapeNode {\n id: number;\n kernelName: string;\n outputs: Tensor[];\n inputs: NamedTensorMap;\n // Optional params, defined only for ops with gradient impl.\n gradient?: (dys: Tensor[]) => NamedGradientMap;\n saved?: Tensor[];\n}\n\nexport type NamedGradientMap = {\n [inputName: string]: () => Tensor;\n};\n\n/**\n * Computes a list of TapeNodes that connect x to y, filtering everything else\n * out and preserving the order of the original tape elements.\n *\n * @param tape The tape elements to filter.\n * @param xs The input Tensors.\n * @param y The output Tensor.\n */\nexport function getFilteredNodesXToY(\n tape: TapeNode[], xs: Tensor[], y: Tensor): TapeNode[] {\n // Forward pass to compute all the nodes and Tensors that are transitively a\n // function of x.\n const tensorsFromX: {[tensorId: number]: boolean} = {};\n const nodesFromX: {[nodeId: number]: boolean} = {};\n for (let i = 0; i < xs.length; i++) {\n tensorsFromX[xs[i].id] = true;\n }\n\n for (let i = 0; i < tape.length; i++) {\n const node = tape[i];\n const nodeInputs = node.inputs;\n for (const inputName in nodeInputs) {\n const input = nodeInputs[inputName];\n\n let anyInputFromX = false;\n for (let j = 0; j < xs.length; j++) {\n if (tensorsFromX[input.id]) {\n node.outputs.forEach(output => tensorsFromX[output.id] = true);\n anyInputFromX = true;\n nodesFromX[node.id] = true;\n break;\n }\n }\n\n if (anyInputFromX) {\n break;\n }\n }\n }\n\n // Backward pass to find all of the nodes and Tensors that lead to y.\n const tensorsLeadToY: {[tensorId: number]: boolean} = {};\n tensorsLeadToY[y.id] = true;\n const nodesToY: {[nodeId: number]: boolean} = {};\n\n for (let i = tape.length - 1; i >= 0; i--) {\n const node = tape[i];\n const nodeInputs = node.inputs;\n\n // If any of the outputs lead to y, mark all of the inputs as leading to y.\n for (let j = 0; j < node.outputs.length; j++) {\n if (tensorsLeadToY[node.outputs[j].id]) {\n for (const inputName in nodeInputs) {\n tensorsLeadToY[nodeInputs[inputName].id] = true;\n nodesToY[node.id] = true;\n }\n break;\n }\n }\n }\n\n // Return the paths that come from x and lead to y.\n const filteredTape: TapeNode[] = [];\n for (let i = 0; i < tape.length; i++) {\n const node = tape[i];\n\n if (nodesFromX[node.id] && nodesToY[node.id]) {\n // Prune the inputs from the node that aren't a function of x.\n const prunedInputs: {[inputName: string]: Tensor} = {};\n for (const inputName in node.inputs) {\n const nodeInput = node.inputs[inputName];\n if (tensorsFromX[nodeInput.id]) {\n prunedInputs[inputName] = nodeInput;\n }\n }\n\n // Copy the node and overwrite inputsAndArgs to the pruned version.\n const prunedNode = Object.assign({}, node);\n prunedNode.inputs = prunedInputs;\n prunedNode.outputs = node.outputs;\n\n filteredTape.push(prunedNode);\n }\n }\n\n return filteredTape;\n}\n\n/**\n * Backpropagate gradients through the filtered TapeNodes.\n *\n * @param tensorAccumulatedGradientMap A map of Tensor to its gradient. This map\n * is mutated by this method.\n * @param filteredTape The filtered TapeNodes to backprop through.\n */\nexport function backpropagateGradients(\n tensorAccumulatedGradientMap: {[tensorId: number]: Tensor},\n filteredTape: TapeNode[], tidy: (f: Function) => Tensor,\n add: (a: Tensor, b: Tensor) => Tensor) {\n // Walk the tape backward and keep a map of Tensor to its gradient.\n for (let i = filteredTape.length - 1; i >= 0; i--) {\n const node = filteredTape[i];\n\n const dys: Tensor[] = [];\n node.outputs.forEach(o => {\n const gradTensor = tensorAccumulatedGradientMap[o.id];\n if (gradTensor != null) {\n dys.push(gradTensor);\n } else {\n // This particular output is not in the back-propagation subgraph, so it\n // does not affect the final output, thus we put null for its dy.\n dys.push(null);\n }\n });\n\n if (node.gradient == null) {\n throw new Error(\n `Cannot compute gradient: gradient function not found ` +\n `for ${node.kernelName}.`);\n }\n\n // Backprop dy through this node and accumulate gradients over the inputs.\n const inputGradients = node.gradient(dys);\n\n for (const inputName in node.inputs) {\n if (!(inputName in inputGradients)) {\n throw new Error(\n `Cannot backprop through input ${inputName}. ` +\n `Available gradients found: ${Object.keys(inputGradients)}.`);\n }\n\n // Call the gradient function.\n const dx = tidy(() => inputGradients[inputName]());\n if (dx.dtype !== 'float32') {\n throw new Error(\n `Error in gradient for op ${\n node.kernelName}. The gradient of input ` +\n `${inputName} must have 'float32' dtype, but has '${dx.dtype}'`);\n }\n const x = node.inputs[inputName];\n if (!util.arraysEqual(dx.shape, x.shape)) {\n throw new Error(\n `Error in gradient for op ${\n node.kernelName}. The gradient of input ` +\n `'${inputName}' has shape '${dx.shape}', which does not match ` +\n `the shape of the input '${x.shape}'`);\n }\n\n if (tensorAccumulatedGradientMap[x.id] == null) {\n tensorAccumulatedGradientMap[x.id] = dx;\n } else {\n const curGradient = tensorAccumulatedGradientMap[x.id];\n tensorAccumulatedGradientMap[x.id] = add(curGradient, dx);\n curGradient.dispose();\n }\n }\n }\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, TypedArray} from './types';\nimport {computeStrides, isString, rightPad, sizeFromShape} from './util';\n\n// Maximum number of values before we decide to show ellipsis.\nconst FORMAT_LIMIT_NUM_VALS = 20;\n// Number of first and last values to show when displaying a, b,...,y, z.\nconst FORMAT_NUM_FIRST_LAST_VALS = 3;\n// Number of significant digits to show.\nconst FORMAT_NUM_SIG_DIGITS = 7;\n\nexport function tensorToString(\n vals: TypedArray|string[], shape: number[], dtype: DataType,\n verbose: boolean) {\n const strides = computeStrides(shape);\n const padPerCol = computeMaxSizePerColumn(vals, shape, dtype, strides);\n const rank = shape.length;\n const valsLines = subTensorToString(vals, shape, dtype, strides, padPerCol);\n const lines = ['Tensor'];\n if (verbose) {\n lines.push(` dtype: ${dtype}`);\n lines.push(` rank: ${rank}`);\n lines.push(` shape: [${shape}]`);\n lines.push(` values:`);\n }\n lines.push(valsLines.map(l => ' ' + l).join('\\n'));\n return lines.join('\\n');\n}\n\nfunction computeMaxSizePerColumn(\n vals: TypedArray|string[], shape: number[], dtype: DataType,\n strides: number[]): number[] {\n const n = sizeFromShape(shape);\n const numCols = strides[strides.length - 1];\n const padPerCol = new Array(numCols).fill(0);\n const rank = shape.length;\n const valuesOrTuples =\n dtype === 'complex64' ? createComplexTuples(vals) : vals;\n\n if (rank > 1) {\n for (let row = 0; row < n / numCols; row++) {\n const offset = row * numCols;\n for (let j = 0; j < numCols; j++) {\n padPerCol[j] = Math.max(\n padPerCol[j],\n valToString(valuesOrTuples[offset + j], 0, dtype).length);\n }\n }\n }\n return padPerCol;\n}\n\nfunction valToString(\n val: number|string|[number, number], pad: number, dtype: DataType) {\n let valStr: string;\n if (Array.isArray(val)) {\n valStr = `${parseFloat(val[0].toFixed(FORMAT_NUM_SIG_DIGITS))} + ` +\n `${parseFloat(val[1].toFixed(FORMAT_NUM_SIG_DIGITS))}j`;\n } else if (isString(val)) {\n valStr = `'${val}'`;\n } else if (dtype === 'bool') {\n valStr = boolNumToString(val);\n } else {\n valStr = parseFloat(val.toFixed(FORMAT_NUM_SIG_DIGITS)).toString();\n }\n\n return rightPad(valStr, pad);\n}\n\nfunction boolNumToString(v: number): string {\n return v === 0 ? 'false' : 'true';\n}\n\nfunction subTensorToString(\n vals: TypedArray|string[], shape: number[], dtype: DataType,\n strides: number[], padPerCol: number[], isLast = true): string[] {\n const storagePerElement = dtype === 'complex64' ? 2 : 1;\n\n const size = shape[0];\n const rank = shape.length;\n if (rank === 0) {\n if (dtype === 'complex64') {\n const complexTuple = createComplexTuples(vals);\n return [valToString(complexTuple[0], 0, dtype)];\n }\n if (dtype === 'bool') {\n return [boolNumToString(vals[0] as number)];\n }\n return [vals[0].toString()];\n }\n\n if (rank === 1) {\n if (size > FORMAT_LIMIT_NUM_VALS) {\n const firstValsSize = FORMAT_NUM_FIRST_LAST_VALS * storagePerElement;\n\n let firstVals = Array.from(\n vals.slice(0, firstValsSize));\n let lastVals = Array.from(vals.slice(\n (size - FORMAT_NUM_FIRST_LAST_VALS) * storagePerElement,\n size * storagePerElement));\n if (dtype === 'complex64') {\n firstVals = createComplexTuples(firstVals);\n lastVals = createComplexTuples(lastVals);\n }\n return [\n '[' +\n firstVals.map((x, i) => valToString(x, padPerCol[i], dtype))\n .join(', ') +\n ', ..., ' +\n lastVals\n .map(\n (x, i) => valToString(\n x, padPerCol[size - FORMAT_NUM_FIRST_LAST_VALS + i], dtype))\n .join(', ') +\n ']'\n ];\n }\n const displayVals: Array =\n dtype === 'complex64' ? createComplexTuples(vals) :\n Array.from(vals);\n\n return [\n '[' +\n displayVals.map((x, i) => valToString(x, padPerCol[i], dtype))\n .join(', ') +\n ']'\n ];\n }\n\n // The array is rank 2 or more.\n const subshape = shape.slice(1);\n const substrides = strides.slice(1);\n const stride = strides[0] * storagePerElement;\n const lines: string[] = [];\n if (size > FORMAT_LIMIT_NUM_VALS) {\n for (let i = 0; i < FORMAT_NUM_FIRST_LAST_VALS; i++) {\n const start = i * stride;\n const end = start + stride;\n lines.push(...subTensorToString(\n vals.slice(start, end), subshape, dtype, substrides, padPerCol,\n false /* isLast */));\n }\n lines.push('...');\n for (let i = size - FORMAT_NUM_FIRST_LAST_VALS; i < size; i++) {\n const start = i * stride;\n const end = start + stride;\n lines.push(...subTensorToString(\n vals.slice(start, end), subshape, dtype, substrides, padPerCol,\n i === size - 1 /* isLast */));\n }\n } else {\n for (let i = 0; i < size; i++) {\n const start = i * stride;\n const end = start + stride;\n lines.push(...subTensorToString(\n vals.slice(start, end), subshape, dtype, substrides, padPerCol,\n i === size - 1 /* isLast */));\n }\n }\n const sep = rank === 2 ? ',' : '';\n lines[0] = '[' + lines[0] + sep;\n for (let i = 1; i < lines.length - 1; i++) {\n lines[i] = ' ' + lines[i] + sep;\n }\n let newLineSep = ',\\n';\n for (let i = 2; i < rank; i++) {\n newLineSep += '\\n';\n }\n lines[lines.length - 1] =\n ' ' + lines[lines.length - 1] + ']' + (isLast ? '' : newLineSep);\n return lines;\n}\n\nfunction createComplexTuples(vals: Array<{}>|\n TypedArray): Array<[number, number]> {\n const complexTuples: Array<[number, number]> = [];\n for (let i = 0; i < vals.length; i += 2) {\n complexTuples.push([vals[i], vals[i + 1]] as [number, number]);\n }\n return complexTuples;\n}\n", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getGlobal} from './global_util';\nimport {tensorToString} from './tensor_format';\nimport {ArrayMap, BackendValues, DataType, DataTypeMap, DataValues, NumericDataType, Rank, ShapeMap, SingleValueMap, TypedArray} from './types';\nimport * as util from './util';\nimport {computeStrides, toNestedArray} from './util';\n\nexport interface TensorData {\n dataId?: DataId;\n values?: DataTypeMap[D];\n}\n\n// This interface mimics KernelBackend (in backend.ts), which would create a\n// circular dependency if imported.\nexport interface Backend {}\n\n/**\n * A mutable object, similar to `tf.Tensor`, that allows users to set values\n * at locations before converting to an immutable `tf.Tensor`.\n *\n * See `tf.buffer` for creating a tensor buffer.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\nexport class TensorBuffer {\n size: number;\n shape: ShapeMap[R];\n strides: number[];\n values: DataTypeMap[D];\n\n constructor(shape: ShapeMap[R], public dtype: D, values?: DataTypeMap[D]) {\n this.shape = shape.slice() as ShapeMap[R];\n this.size = util.sizeFromShape(shape);\n\n if (values != null) {\n const n = values.length;\n util.assert(\n n === this.size,\n () => `Length of values '${n}' does not match the size ` +\n `inferred by the shape '${this.size}'.`);\n }\n if (dtype === 'complex64') {\n throw new Error(\n `complex64 dtype TensorBuffers are not supported. Please create ` +\n `a TensorBuffer for the real and imaginary parts separately and ` +\n `call tf.complex(real, imag).`);\n }\n this.values = values || util.getArrayFromDType(dtype, this.size);\n this.strides = computeStrides(shape);\n }\n\n /**\n * Sets a value in the buffer at a given location.\n *\n * @param value The value to set.\n * @param locs The location indices.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\n set(value: SingleValueMap[D], ...locs: number[]): void {\n if (locs.length === 0) {\n locs = [0];\n }\n util.assert(\n locs.length === this.rank,\n () => `The number of provided coordinates (${locs.length}) must ` +\n `match the rank (${this.rank})`);\n\n const index = this.locToIndex(locs);\n this.values[index] = value as number;\n }\n\n /**\n * Returns the value in the buffer at the provided location.\n *\n * @param locs The location indices.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\n get(...locs: number[]): SingleValueMap[D] {\n if (locs.length === 0) {\n locs = [0];\n }\n let i = 0;\n for (const loc of locs) {\n if (loc < 0 || loc >= this.shape[i]) {\n const msg = `Requested out of range element at ${locs}. ` +\n ` Buffer shape=${this.shape}`;\n throw new Error(msg);\n }\n i++;\n }\n let index = locs[locs.length - 1];\n for (let i = 0; i < locs.length - 1; ++i) {\n index += this.strides[i] * locs[i];\n }\n return this.values[index] as SingleValueMap[D];\n }\n\n locToIndex(locs: number[]): number {\n if (this.rank === 0) {\n return 0;\n } else if (this.rank === 1) {\n return locs[0];\n }\n let index = locs[locs.length - 1];\n for (let i = 0; i < locs.length - 1; ++i) {\n index += this.strides[i] * locs[i];\n }\n return index;\n }\n\n indexToLoc(index: number): number[] {\n if (this.rank === 0) {\n return [];\n } else if (this.rank === 1) {\n return [index];\n }\n const locs: number[] = new Array(this.shape.length);\n for (let i = 0; i < locs.length - 1; ++i) {\n locs[i] = Math.floor(index / this.strides[i]);\n index -= locs[i] * this.strides[i];\n }\n locs[locs.length - 1] = index;\n return locs;\n }\n\n get rank() {\n return this.shape.length;\n }\n\n /**\n * Creates an immutable `tf.Tensor` object from the buffer.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\n toTensor(): Tensor {\n return trackerFn().makeTensor(this.values, this.shape, this.dtype) as\n Tensor;\n }\n}\n\nexport interface TensorTracker {\n makeTensor(\n values: DataValues, shape: number[], dtype: DataType,\n backend?: Backend): Tensor;\n makeVariable(\n initialValue: Tensor, trainable?: boolean, name?: string,\n dtype?: DataType): Variable;\n incRef(a: Tensor, backend: Backend): void;\n disposeTensor(t: Tensor): void;\n disposeVariable(v: Variable): void;\n read(dataId: DataId): Promise;\n readSync(dataId: DataId): BackendValues;\n}\n\n/**\n * The Tensor class calls into this handler to delegate chaining operations.\n */\nexport interface OpHandler {\n cast(x: T, dtype: DataType): T;\n buffer(\n shape: ShapeMap[R], dtype: D,\n values?: DataTypeMap[D]): TensorBuffer;\n print(x: T, verbose: boolean): void;\n clone(x: T): T;\n // TODO(yassogba) bring reshape back?\n}\n\n// For tracking tensor creation and disposal.\nlet trackerFn: () => TensorTracker = null;\n// Used by chaining methods to call into ops.\nlet opHandler: OpHandler = null;\n// Used to warn about deprecated methods.\nlet deprecationWarningFn: (msg: string) => void = null;\n// This here so that we can use this method on dev branches and keep the\n// functionality at master.\n// tslint:disable-next-line:no-unused-expression\n[deprecationWarningFn];\n\n/**\n * An external consumer can register itself as the tensor tracker. This way\n * the Tensor class can notify the tracker for every tensor created and\n * disposed.\n */\nexport function setTensorTracker(fn: () => TensorTracker) {\n trackerFn = fn;\n}\n\n/**\n * An external consumer can register itself as the op handler. This way the\n * Tensor class can have chaining methods that call into ops via the op\n * handler.\n */\nexport function setOpHandler(handler: OpHandler) {\n opHandler = handler;\n}\n\n/**\n * Sets the deprecation warning function to be used by this file. This way the\n * Tensor class can be a leaf but still use the environment.\n */\nexport function setDeprecationWarningFn(fn: (msg: string) => void) {\n deprecationWarningFn = fn;\n}\n\n/**\n * We wrap data id since we use weak map to avoid memory leaks.\n * Since we have our own memory management, we have a reference counter\n * mapping a tensor to its data, so there is always a pointer (even if that\n * data is otherwise garbage collectable).\n * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/\n * Global_Objects/WeakMap\n */\nexport type DataId = object; // object instead of {} to force non-primitive.\n\n// Declare this namespace to make Tensor class augmentation work in google3.\nexport declare namespace Tensor {}\n/**\n * A `tf.Tensor` object represents an immutable, multidimensional array of\n * numbers that has a shape and a data type.\n *\n * For performance reasons, functions that create tensors do not necessarily\n * perform a copy of the data passed to them (e.g. if the data is passed as a\n * `Float32Array`), and changes to the data will change the tensor. This is not\n * a feature and is not supported. To avoid this behavior, use the tensor before\n * changing the input data or create a copy with `copy = tf.add(yourTensor, 0)`.\n *\n * See `tf.tensor` for details on how to create a `tf.Tensor`.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\nexport class Tensor {\n /** Unique id of this tensor. */\n readonly id: number;\n /**\n * Id of the bucket holding the data for this tensor. Multiple arrays can\n * point to the same bucket (e.g. when calling array.reshape()).\n */\n dataId: DataId;\n /** The shape of the tensor. */\n readonly shape: ShapeMap[R];\n /** Number of elements in the tensor. */\n readonly size: number;\n /** The data type for the array. */\n readonly dtype: DataType;\n /** The rank type for the array (see `Rank` enum). */\n readonly rankType: R;\n\n /** Whether this tensor has been globally kept. */\n kept = false;\n /** The id of the scope this tensor is being tracked in. */\n scopeId: number;\n\n /**\n * Number of elements to skip in each dimension when indexing. See\n * https://docs.scipy.org/doc/numpy/reference/generated/\\\n * numpy.ndarray.strides.html\n */\n readonly strides: number[];\n\n constructor(shape: ShapeMap[R], dtype: DataType, dataId: DataId, id: number) {\n this.shape = shape.slice() as ShapeMap[R];\n this.dtype = dtype || 'float32';\n this.size = util.sizeFromShape(shape);\n this.strides = computeStrides(shape);\n this.dataId = dataId;\n this.id = id;\n this.rankType = (this.rank < 5 ? this.rank.toString() : 'higher') as R;\n }\n\n get rank(): number {\n return this.shape.length;\n }\n\n /**\n * Returns a promise of `tf.TensorBuffer` that holds the underlying data.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n async buffer(): Promise> {\n const vals = await this.data();\n return opHandler.buffer(this.shape, this.dtype as D, vals);\n }\n\n /**\n * Returns a `tf.TensorBuffer` that holds the underlying data.\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n bufferSync(): TensorBuffer {\n return opHandler.buffer(this.shape, this.dtype as D, this.dataSync());\n }\n\n /**\n * Returns the tensor data as a nested array. The transfer of data is done\n * asynchronously.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n async array(): Promise {\n const vals = await this.data();\n return toNestedArray(this.shape, vals, this.dtype === 'complex64') as\n ArrayMap[R];\n }\n\n /**\n * Returns the tensor data as a nested array. The transfer of data is done\n * synchronously.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n arraySync(): ArrayMap[R] {\n return toNestedArray(\n this.shape, this.dataSync(), this.dtype === 'complex64') as\n ArrayMap[R];\n }\n\n /**\n * Asynchronously downloads the values from the `tf.Tensor`. Returns a\n * promise of `TypedArray` that resolves when the computation has finished.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n async data(): Promise {\n this.throwIfDisposed();\n const data = trackerFn().read(this.dataId);\n if (this.dtype === 'string') {\n const bytes = await data as Uint8Array[];\n try {\n return bytes.map(b => util.decodeString(b)) as DataTypeMap[D];\n } catch {\n throw new Error(\n 'Failed to decode the string bytes into utf-8. ' +\n 'To get the original bytes, call tensor.bytes().');\n }\n }\n return data as Promise;\n }\n\n /**\n * Synchronously downloads the values from the `tf.Tensor`. This blocks the\n * UI thread until the values are ready, which can cause performance issues.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n dataSync(): DataTypeMap[D] {\n this.throwIfDisposed();\n const data = trackerFn().readSync(this.dataId);\n if (this.dtype === 'string') {\n try {\n return (data as Uint8Array[]).map(b => util.decodeString(b)) as\n DataTypeMap[D];\n } catch {\n throw new Error(\n 'Failed to decode the string bytes into utf-8. ' +\n 'To get the original bytes, call tensor.bytes().');\n }\n }\n return data as DataTypeMap[D];\n }\n\n /** Returns the underlying bytes of the tensor's data. */\n async bytes(): Promise {\n this.throwIfDisposed();\n const data = await trackerFn().read(this.dataId);\n if (this.dtype === 'string') {\n return data as Uint8Array[];\n } else {\n return new Uint8Array((data as TypedArray).buffer);\n }\n }\n\n /**\n * Disposes `tf.Tensor` from memory.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n dispose(): void {\n if (this.isDisposed) {\n return;\n }\n trackerFn().disposeTensor(this);\n this.isDisposedInternal = true;\n }\n\n protected isDisposedInternal = false;\n get isDisposed(): boolean {\n return this.isDisposedInternal;\n }\n\n throwIfDisposed() {\n if (this.isDisposed) {\n throw new Error(`Tensor is disposed.`);\n }\n }\n\n /**\n * Prints the `tf.Tensor`. See `tf.print` for details.\n *\n * @param verbose Whether to print verbose information about the tensor,\n * including dtype and size.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n print(verbose = false): void {\n return opHandler.print(this, verbose);\n }\n\n /**\n * Returns a copy of the tensor. See `tf.clone` for details.\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n clone(this: T): T {\n this.throwIfDisposed();\n return opHandler.clone(this);\n }\n\n /**\n * Returns a human-readable description of the tensor. Useful for logging.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n toString(verbose = false): string {\n const vals = this.dataSync();\n return tensorToString(vals, this.shape, this.dtype, verbose);\n }\n\n cast(dtype: DataType): T {\n this.throwIfDisposed();\n return opHandler.cast(this as T, dtype);\n }\n variable(trainable = true, name?: string, dtype?: DataType): Variable {\n this.throwIfDisposed();\n return trackerFn().makeVariable(this, trainable, name, dtype) as\n Variable;\n }\n}\nObject.defineProperty(Tensor, Symbol.hasInstance, {\n value: (instance: Tensor) => {\n // Implementation note: we should use properties of the object that will be\n // defined before the constructor body has finished executing (methods).\n // This is because when this code is transpiled by babel, babel will call\n // classCallCheck before the constructor body is run.\n // See https://github.com/tensorflow/tfjs/issues/3384 for backstory.\n return !!instance && instance.data != null && instance.dataSync != null &&\n instance.throwIfDisposed != null;\n }\n});\n\nexport function getGlobalTensorClass() {\n // Use getGlobal so that we can augment the Tensor class across package\n // boundaries becase the node resolution alg may result in different modules\n // being returned for this file depending on the path they are loaded from.\n return getGlobal('Tensor', () => {\n return Tensor;\n });\n}\n\n// Global side effect. Cache global reference to Tensor class\ngetGlobalTensorClass();\n\nexport interface NumericTensor extends Tensor {\n dtype: NumericDataType;\n dataSync(): DataTypeMap[D];\n data(): Promise;\n}\n\nexport interface StringTensor extends Tensor {\n dtype: 'string';\n dataSync(): DataTypeMap[D];\n data(): Promise;\n}\n\n/** @doclink Tensor */\nexport type Scalar = Tensor;\n/** @doclink Tensor */\nexport type Tensor1D = Tensor;\n/** @doclink Tensor */\nexport type Tensor2D = Tensor;\n/** @doclink Tensor */\nexport type Tensor3D = Tensor;\n/** @doclink Tensor */\nexport type Tensor4D = Tensor;\n/** @doclink Tensor */\nexport type Tensor5D = Tensor;\n/** @doclink Tensor */\nexport type Tensor6D = Tensor;\n\n/**\n * A mutable `tf.Tensor`, useful for persisting state, e.g. for training.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\nexport class Variable extends Tensor {\n name: string;\n\n constructor(\n initialValue: Tensor, public trainable: boolean, name: string,\n tensorId: number) {\n super(\n initialValue.shape, initialValue.dtype, initialValue.dataId, tensorId);\n this.name = name;\n }\n\n /**\n * Assign a new `tf.Tensor` to this variable. The new `tf.Tensor` must have\n * the same shape and dtype as the old `tf.Tensor`.\n *\n * @param newValue New tensor to be assigned to this variable.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n assign(newValue: Tensor): void {\n if (newValue.dtype !== this.dtype) {\n throw new Error(\n `dtype of the new value (${newValue.dtype}) and ` +\n `previous value (${this.dtype}) must match`);\n }\n if (!util.arraysEqual(newValue.shape, this.shape)) {\n throw new Error(\n `shape of the new value (${newValue.shape}) and ` +\n `previous value (${this.shape}) must match`);\n }\n trackerFn().disposeTensor(this);\n this.dataId = newValue.dataId;\n trackerFn().incRef(this, null /* backend */);\n }\n\n dispose(): void {\n trackerFn().disposeVariable(this);\n this.isDisposedInternal = true;\n }\n}\n\nObject.defineProperty(Variable, Symbol.hasInstance, {\n value: (instance: Variable) => {\n return instance instanceof Tensor && instance.assign != null &&\n instance.assign instanceof Function;\n }\n});\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from './tensor';\nimport {TensorContainer, TensorContainerArray} from './tensor_types';\nimport {upcastType} from './types';\nimport {assert} from './util';\n\nexport function makeTypesMatch(a: T, b: T): [T, T] {\n if (a.dtype === b.dtype) {\n return [a, b];\n }\n const dtype = upcastType(a.dtype, b.dtype);\n return [a.cast(dtype), b.cast(dtype)];\n}\n\nexport function assertTypesMatch(a: Tensor, b: Tensor): void {\n assert(\n a.dtype === b.dtype,\n () => `The dtypes of the first(${a.dtype}) and` +\n ` second(${b.dtype}) input must match`);\n}\n\nexport function isTensorInList(tensor: Tensor, tensorList: Tensor[]): boolean {\n return tensorList.some(x => x.id === tensor.id);\n}\n\n/**\n * Extracts any `Tensor`s found within the provided object.\n *\n * @param container an object that may be a `Tensor` or may directly contain\n * `Tensor`s, such as a `Tensor[]` or `{key: Tensor, ...}`. In general it\n * is safe to pass any object here, except that `Promise`s are not\n * supported.\n * @returns An array of `Tensors` found within the passed object. If the\n * argument is simply a `Tensor', a list containing that `Tensor` is\n * returned. If the object is not a `Tensor` or does not\n * contain `Tensors`, an empty list is returned.\n */\nexport function getTensorsInContainer(result: TensorContainer): Tensor[] {\n const list: Tensor[] = [];\n const seen = new Set<{}|void>();\n walkTensorContainer(result, list, seen);\n return list;\n}\n\nfunction walkTensorContainer(\n container: TensorContainer, list: Tensor[], seen: Set<{}|void>): void {\n if (container == null) {\n return;\n }\n if (container instanceof Tensor) {\n list.push(container);\n return;\n }\n if (!isIterable(container)) {\n return;\n }\n // Iteration over keys works also for arrays.\n const iterable = container as TensorContainerArray;\n for (const k in iterable) {\n const val = iterable[k];\n if (!seen.has(val)) {\n seen.add(val);\n walkTensorContainer(val, list, seen);\n }\n }\n}\n\n// tslint:disable-next-line:no-any\nfunction isIterable(obj: any): boolean {\n return Array.isArray(obj) || typeof obj === 'object';\n}\n", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/** @docalias number[] */\nexport interface ShapeMap {\n R0: number[];\n R1: [number];\n R2: [number, number];\n R3: [number, number, number];\n R4: [number, number, number, number];\n R5: [number, number, number, number, number];\n R6: [number, number, number, number, number, number];\n}\n\n/** @docalias number[] */\nexport interface ArrayMap {\n R0: number;\n R1: number[];\n R2: number[][];\n R3: number[][][];\n R4: number[][][][];\n R5: number[][][][][];\n R6: number[][][][][][];\n}\n\nexport interface DataTypeMap {\n float32: Float32Array;\n int32: Int32Array;\n bool: Uint8Array;\n complex64: Float32Array;\n string: string[];\n}\n\nexport interface SingleValueMap {\n bool: boolean;\n int32: number;\n float32: number;\n complex64: number;\n string: string;\n}\n\n/** @docalias 'float32'|'int32'|'bool'|'complex64'|'string' */\nexport type DataType = keyof DataTypeMap;\nexport type NumericDataType = 'float32'|'int32'|'bool'|'complex64';\nexport type TypedArray = Float32Array|Int32Array|Uint8Array;\n/** Tensor data used in tensor creation and user-facing API. */\nexport type DataValues = DataTypeMap[DataType];\n/** The underlying tensor data that gets stored in a backend. */\nexport type BackendValues = Float32Array|Int32Array|Uint8Array|Uint8Array[];\n\nexport enum Rank {\n R0 = 'R0',\n R1 = 'R1',\n R2 = 'R2',\n R3 = 'R3',\n R4 = 'R4',\n R5 = 'R5',\n R6 = 'R6'\n}\n\nexport type FlatVector = boolean[]|number[]|TypedArray;\nexport type RegularArray =\n T[]|T[][]|T[][][]|T[][][][]|T[][][][][]|T[][][][][][];\n\n// tslint:disable-next-line:no-any\nexport interface RecursiveArray {\n [index: number]: T|RecursiveArray;\n}\n\n// Looks for upcasting types. Used, for example, in operations with mixed dtype\n// inputs.\nenum UpcastInt32AndMap {\n 'float32' = 'float32',\n 'int32' = 'int32',\n 'bool' = 'int32',\n 'complex64' = 'complex64'\n}\n\nenum UpcastBoolAndMap {\n 'float32' = 'float32',\n 'int32' = 'int32',\n 'bool' = 'bool',\n 'complex64' = 'complex64'\n}\n\nenum UpcastFloat32AndMap {\n 'float32' = 'float32',\n 'int32' = 'float32',\n 'bool' = 'float32',\n 'complex64' = 'complex64'\n}\n\nenum UpcastComplex64AndMap {\n 'float32' = 'complex64',\n 'int32' = 'complex64',\n 'bool' = 'complex64',\n 'complex64' = 'complex64'\n}\n\nconst upcastTypeMap = {\n 'float32': UpcastFloat32AndMap,\n 'int32': UpcastInt32AndMap,\n 'bool': UpcastBoolAndMap,\n 'complex64': UpcastComplex64AndMap\n};\n\nexport function upcastType(typeA: DataType, typeB: DataType): DataType {\n if (typeA === 'string' || typeB === 'string') {\n if (typeA === 'string' && typeB === 'string') {\n return 'string';\n }\n throw new Error(`Can not upcast ${typeA} with ${typeB}`);\n }\n return upcastTypeMap[typeA][typeB];\n}\n\n/** Returns the output type after summation. */\nexport function sumOutType(type: DataType): DataType {\n return upcastType(type, 'int32');\n}\n\n/** @docalias TypedArray|Array */\nexport type TensorLike =\n TypedArray|number|boolean|string|RecursiveArray|\n RecursiveArray|RecursiveArray|Uint8Array[];\nexport type ScalarLike = number|boolean|string|Uint8Array;\n/** @docalias TypedArray|Array */\nexport type TensorLike1D = TypedArray|number[]|boolean[]|string[]|Uint8Array[];\n/** @docalias TypedArray|Array */\nexport type TensorLike2D = TypedArray|number[]|number[][]|boolean[]|boolean[][]|\n string[]|string[][]|Uint8Array[]|Uint8Array[][];\n/** @docalias TypedArray|Array */\nexport type TensorLike3D = TypedArray|number[]|number[][][]|boolean[]|\n boolean[][][]|string[]|string[][][]|Uint8Array[]|Uint8Array[][][];\n/** @docalias TypedArray|Array */\nexport type TensorLike4D = TypedArray|number[]|number[][][][]|boolean[]|\n boolean[][][][]|string[]|string[][][][]|Uint8Array[]|Uint8Array[][][][];\n/** @docalias TypedArray|Array */\nexport type TensorLike5D =\n TypedArray|number[]|number[][][][][]|boolean[]|boolean[][][][][]|string[]|\n string[][][][][]|Uint8Array[]|Uint8Array[][][][][];\n/** @docalias TypedArray|Array */\nexport type TensorLike6D =\n TypedArray|number[]|number[][][][][][]|boolean[]|boolean[][][][][][]|\n string[]|string[][][][][][]|Uint8Array[]|Uint8Array[][][][][];\n\n/** Type for representing image data in Uint8Array type. */\nexport interface PixelData {\n width: number;\n height: number;\n data: Uint8Array;\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {BackendTimingInfo, DataMover, KernelBackend} from './backends/backend';\nimport {Environment, setEnvironmentGlobal} from './environment';\nimport {getGlobalNamespace} from './global_util';\nimport {Add, Cast, Identity} from './kernel_names';\nimport {getGradient, getKernel, getKernelsForBackend, GradFunc, NamedAttrMap, TensorInfo} from './kernel_registry';\nimport {KernelProfile, Profiler} from './profiler';\nimport {backpropagateGradients, getFilteredNodesXToY, TapeNode} from './tape';\nimport {DataId, setTensorTracker, Tensor, TensorTracker, Variable} from './tensor';\nimport {GradSaveFunc, NamedTensorMap, NamedVariableMap, TensorContainer} from './tensor_types';\nimport {getTensorsInContainer} from './tensor_util';\nimport {BackendValues, DataType, DataValues} from './types';\nimport * as util from './util';\nimport {bytesFromStringArray, makeOnesTypedArray, now, sizeFromShape} from './util';\nimport * as log from './log';\n/**\n * A function that computes an output. The save function is for saving tensors\n * computed in the forward pass, that we need in the backward pass.\n */\nexport type ForwardFunc = (backend: KernelBackend, save?: GradSaveFunc) => T;\n\n/**\n * @docalias (a: Tensor, b: Tensor,..., save?: Function) => {\n * value: Tensor,\n * gradFunc: (dy: Tensor, saved?: NamedTensorMap) => Tensor | Tensor[]\n * }\n */\nexport type CustomGradientFunc =\n (...inputs: Array) => {\n value: T;\n gradFunc: (dy: T, saved: Tensor[]) => Tensor | Tensor[];\n };\n\nexport type MemoryInfo = {\n numTensors: number; numDataBuffers: number; numBytes: number;\n unreliable?: boolean; reasons: string[];\n};\n\ntype KernelInfo = {\n name: string; bytesAdded: number; totalBytesSnapshot: number;\n tensorsAdded: number;\n totalTensorsSnapshot: number;\n inputShapes: number[][];\n outputShapes: number[][];\n kernelTimeMs: number | {error: string} | Promise;\n extraInfo: string | Promise;\n};\n\nexport type ProfileInfo = {\n newBytes: number; newTensors: number; peakBytes: number;\n kernels: KernelInfo[];\n result: TensorContainer;\n kernelNames: string[];\n};\n\nexport interface TimingInfo extends BackendTimingInfo {\n wallMs: number;\n}\n\n/** @docalias Function */\nexport type ScopeFn = () => T;\n\ninterface ScopeState {\n track: Tensor[];\n name: string;\n id: number;\n}\n\ninterface RegisteredKernelInvocation {\n kernelName: string;\n inputs: I;\n attrs?: NamedAttrMap;\n}\n\ninterface CustomGradKernelInvocation {\n forwardFunc: ForwardFunc;\n backwardsFunc: (dy: T, saved: Tensor[]) => {\n [P in keyof I]: () => I[P]\n };\n inputs: I;\n attrs?: NamedAttrMap;\n}\n\nfunction isRegisteredKernelInvocation(\n kernelInvocation: RegisteredKernelInvocation|\n CustomGradKernelInvocation):\n kernelInvocation is RegisteredKernelInvocation {\n return (kernelInvocation as RegisteredKernelInvocation).kernelName != null;\n}\n\nclass EngineState {\n // Public since optimizers will use it.\n registeredVariables: NamedVariableMap = {};\n\n nextTapeNodeId = 0;\n numBytes = 0;\n numTensors = 0;\n numStringTensors = 0;\n numDataBuffers = 0;\n\n activeTape: TapeNode[];\n // Number of nested tf.grad() statements when computing higher-order\n // gradients. E.g. `1` for first-order gradients and `2` for second-order\n // gradients. Used to track if the tape should be removed after a backprop.\n gradientDepth = 0;\n // Number of nested kernel calls. When kernel depth is greater than 1, we turn\n // off the tape.\n kernelDepth = 0;\n\n // Keep Tensors that parallel the tapes.\n activeScope: ScopeState;\n scopeStack: ScopeState[] = [];\n /**\n * Keeps track of the number of data moves during a kernel execution. We\n * maintain a stack since kernels can call other kernels, recursively.\n */\n numDataMovesStack: number[] = [];\n nextScopeId = 0;\n\n tensorInfo = new WeakMap();\n\n profiling = false;\n activeProfile: ProfileInfo = {\n newBytes: 0,\n newTensors: 0,\n peakBytes: 0,\n kernels: [],\n result: null,\n get kernelNames():\n string[] {\n return Array.from(new Set(this.kernels.map(k => k.name)));\n }\n };\n\n dispose() {\n for (const variableName in this.registeredVariables) {\n this.registeredVariables[variableName].dispose();\n }\n }\n}\n\nexport class Engine implements TensorTracker, DataMover {\n state: EngineState;\n backendName: string;\n registry: {[id: string]: KernelBackend} = {};\n registryFactory: {\n [id: string]: {\n factory: () => KernelBackend | Promise,\n priority: number\n }\n } = {};\n\n private profiler: Profiler;\n private backendInstance: KernelBackend;\n private pendingBackendInit: Promise;\n private pendingBackendInitId = 0;\n\n constructor(public ENV: Environment) {\n this.state = new EngineState();\n }\n\n async ready(): Promise {\n if (this.pendingBackendInit != null) {\n return this.pendingBackendInit.then(() => {});\n }\n if (this.backendInstance != null) {\n return;\n }\n const sortedBackends = this.getSortedBackends();\n\n for (let i = 0; i < sortedBackends.length; i++) {\n const backendName = sortedBackends[i];\n const success = await this.initializeBackend(backendName).success;\n if (success) {\n await this.setBackend(backendName);\n return;\n }\n }\n\n throw new Error(\n `Could not initialize any backends, all backend initializations ` +\n `failed.`);\n }\n\n get backend(): KernelBackend {\n if (this.pendingBackendInit != null) {\n throw new Error(\n `Backend '${this.backendName}' has not yet been initialized. Make ` +\n `sure to await tf.ready() or await tf.setBackend() before calling ` +\n `other methods`);\n }\n if (this.backendInstance == null) {\n const {name, asyncInit} = this.initializeBackendsAndReturnBest();\n if (asyncInit) {\n throw new Error(\n `The highest priority backend '${name}' has not yet been ` +\n `initialized. Make sure to await tf.ready() or ` +\n `await tf.setBackend() before calling other methods`);\n }\n this.setBackend(name);\n }\n return this.backendInstance;\n }\n\n backendNames(): string[] {\n return Object.keys(this.registryFactory);\n }\n\n findBackend(backendName: string): KernelBackend {\n if (!(backendName in this.registry)) {\n // If the backend hasn't been initialized but we have a registry entry for\n // it, initialize it and return it.\n if (backendName in this.registryFactory) {\n const {asyncInit} = this.initializeBackend(backendName);\n if (asyncInit) {\n // Backend is not ready yet.\n return null;\n }\n } else {\n return null;\n }\n }\n return this.registry[backendName];\n }\n\n findBackendFactory(backendName: string):\n () => KernelBackend | Promise {\n if (!(backendName in this.registryFactory)) {\n return null;\n }\n return this.registryFactory[backendName].factory;\n }\n\n registerBackend(\n backendName: string,\n factory: () => KernelBackend | Promise,\n priority = 1): boolean {\n if (backendName in this.registryFactory) {\n log.warn(\n `${backendName} backend was already registered. ` +\n `Reusing existing backend factory.`);\n return false;\n }\n this.registryFactory[backendName] = {factory, priority};\n return true;\n }\n\n async setBackend(backendName: string): Promise {\n if (this.registryFactory[backendName] == null) {\n throw new Error(`Backend name '${backendName}' not found in registry`);\n }\n this.backendName = backendName;\n if (this.registry[backendName] == null) {\n this.backendInstance = null;\n const {success, asyncInit} = this.initializeBackend(backendName);\n const result = asyncInit ? await success : success;\n if (!result) {\n return false;\n }\n }\n this.backendInstance = this.registry[backendName];\n this.setupRegisteredKernels();\n // Reset the profiler.\n this.profiler = new Profiler(this.backendInstance);\n\n return true;\n }\n\n private setupRegisteredKernels(): void {\n const kernels = getKernelsForBackend(this.backendName);\n kernels.forEach(kernel => {\n if (kernel.setupFunc != null) {\n kernel.setupFunc(this.backendInstance);\n }\n });\n }\n\n private disposeRegisteredKernels(backendName: string): void {\n const kernels = getKernelsForBackend(backendName);\n kernels.forEach(kernel => {\n if (kernel.disposeFunc != null) {\n kernel.disposeFunc(this.registry[backendName]);\n }\n });\n }\n\n /**\n * Initializes a backend by looking up the backend name in the factory\n * registry and calling the factory method. Returns a boolean representing\n * whether the initialization of the backend suceeded. Throws an error if\n * there is no backend in the factory registry.\n */\n private initializeBackend(backendName: string):\n {success: boolean|Promise, asyncInit: boolean} {\n const registryFactoryEntry = this.registryFactory[backendName];\n if (registryFactoryEntry == null) {\n throw new Error(\n `Cannot initialize backend ${backendName}, no registration found.`);\n }\n\n try {\n const backend = registryFactoryEntry.factory();\n /* Test if the factory returns a promise.\n Done in a more liberal way than\n previous 'Promise.resolve(backend)===backend'\n as we needed to account for custom Promise\n implementations (e.g. Angular) */\n if (backend && !(backend instanceof KernelBackend) &&\n typeof backend.then === 'function') {\n const promiseId = ++this.pendingBackendInitId;\n const success =\n backend\n .then(backendInstance => {\n // Outdated promise. Another backend was set in the meantime.\n if (promiseId < this.pendingBackendInitId) {\n return false;\n }\n this.registry[backendName] = backendInstance;\n this.pendingBackendInit = null;\n return true;\n })\n .catch(err => {\n // Outdated promise. Another backend was set in the meantime.\n if (promiseId < this.pendingBackendInitId) {\n return false;\n }\n this.pendingBackendInit = null;\n log.warn(\n `Initialization of backend ${backendName} failed`);\n log.warn(err.stack || err.message);\n return false;\n });\n this.pendingBackendInit = success;\n return {success, asyncInit: true};\n } else {\n this.registry[backendName] = backend as KernelBackend;\n return {success: true, asyncInit: false};\n }\n } catch (err) {\n log.warn(`Initialization of backend ${backendName} failed`);\n log.warn(err.stack || err.message);\n return {success: false, asyncInit: false};\n }\n }\n\n removeBackend(backendName: string): void {\n if (!(backendName in this.registryFactory)) {\n throw new Error(`${backendName} backend not found in registry`);\n }\n if (this.backendName === backendName && this.pendingBackendInit != null) {\n // There is a pending promise of the backend we want to remove. Make it\n // obsolete.\n this.pendingBackendInitId++;\n }\n\n if (backendName in this.registry) {\n this.disposeRegisteredKernels(backendName);\n this.registry[backendName].dispose();\n delete this.registry[backendName];\n }\n\n delete this.registryFactory[backendName];\n\n // Unset the backend if it is active.\n if (this.backendName === backendName) {\n this.pendingBackendInit = null;\n this.backendName = null;\n this.backendInstance = null;\n }\n }\n\n private getSortedBackends(): string[] {\n if (Object.keys(this.registryFactory).length === 0) {\n throw new Error('No backend found in registry.');\n }\n return Object.keys(this.registryFactory).sort((a: string, b: string) => {\n // Highest priority comes first.\n return this.registryFactory[b].priority -\n this.registryFactory[a].priority;\n });\n }\n\n private initializeBackendsAndReturnBest():\n {name: string, asyncInit: boolean} {\n const sortedBackends = this.getSortedBackends();\n\n for (let i = 0; i < sortedBackends.length; i++) {\n const backendName = sortedBackends[i];\n const {success, asyncInit} = this.initializeBackend(backendName);\n if (asyncInit || success) {\n return {name: backendName, asyncInit};\n }\n }\n throw new Error(\n `Could not initialize any backends, all backend initializations ` +\n `failed.`);\n }\n\n moveData(backend: KernelBackend, dataId: DataId) {\n const info = this.state.tensorInfo.get(dataId);\n const srcBackend = info.backend;\n const values = this.readSync(dataId);\n const refCount = srcBackend.refCount(dataId);\n // Delete the tensor from the old backend and move it to the new\n // backend.\n srcBackend.disposeData(dataId, true);\n info.backend = backend;\n backend.move(dataId, values, info.shape, info.dtype, refCount);\n if (this.shouldCheckForMemLeaks()) {\n // Track the number of moves during a kernel execution to correctly\n // detect memory leaks.\n this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1]++;\n }\n }\n\n tidy(nameOrFn: string|ScopeFn, fn?: ScopeFn):\n T {\n let name: string = null;\n if (fn == null) {\n // Called with only 1 argument.\n if (typeof nameOrFn !== 'function') {\n throw new Error('Please provide a function to tidy()');\n }\n fn = nameOrFn;\n } else {\n // Called with 2 arguments.\n if (typeof nameOrFn !== 'string' && !(nameOrFn instanceof String)) {\n throw new Error(\n 'When calling with two arguments, the first argument ' +\n 'to tidy() must be a string');\n }\n if (typeof fn !== 'function') {\n throw new Error(\n 'When calling with two arguments, the 2nd argument ' +\n 'to tidy() must be a function');\n }\n name = nameOrFn as string;\n // TODO(nsthorat,smilkov): Do operation logging and performance\n // profiling.\n }\n let result: T;\n return this.scopedRun(\n () => this.startScope(name), () => this.endScope(result), () => {\n result = fn();\n if (result instanceof Promise) {\n console.error('Cannot return a Promise inside of tidy.');\n }\n return result;\n });\n }\n\n private scopedRun(start: () => void, end: () => void, f: () => T): T {\n start();\n try {\n const res = f();\n end();\n return res;\n } catch (ex) {\n end();\n throw ex;\n }\n }\n\n private static nextTensorId = 0;\n private nextTensorId(): number {\n return Engine.nextTensorId++;\n }\n\n private static nextVariableId = 0;\n private nextVariableId(): number {\n return Engine.nextVariableId++;\n }\n\n /**\n * This method is called instead of the public-facing tensor.clone() when\n * saving a tensor for backwards pass. It makes sure to add the clone\n * operation to the tape regardless of being called inside a kernel\n * execution.\n */\n private clone(x: Tensor): Tensor {\n const y: Tensor = ENGINE.runKernel(Identity, {x} as {} as NamedTensorMap);\n const inputs = {x};\n const grad = (dy: Tensor) => ({\n x: () => {\n const dtype = 'float32';\n const gradInputs = {x: dy};\n const attrs = {dtype};\n\n return ENGINE.runKernel(\n Cast, gradInputs as {} as NamedTensorMap,\n // tslint:disable-next-line: no-unnecessary-type-assertion\n attrs as {} as NamedAttrMap) as Tensor;\n }\n });\n const saved: Tensor[] = [];\n this.addTapeNode(this.state.activeScope.name, inputs, [y], grad, saved, {});\n return y;\n }\n\n /**\n * Execute a kernel with the given name and return the output tensor.\n *\n * @param kernelName The name of the kernel to execute.\n * @param inputs A map of input names to tensors.\n * @param attrs A map of attribute names to their values. An attribute is a\n * primitive (non-tensor) input to the kernel.\n * @param inputsToSave A list of tensors, inputs to save for the backprop\n * computation.\n * @param outputsToSave A list of booleans, specifying which output to save\n * for the backprop computation. These are booleans since the output\n * tensors are not visible to the user.\n */\n runKernel(\n kernelName: string, inputs: NamedTensorMap, attrs?: NamedAttrMap): T {\n if (this.backendName == null) {\n // backend has not been initialized yet (backend initialization is lazy\n // can be deferred until an op/ kernel is run).\n // The below getter has side effects that will try to initialize the\n // backend and set properties like this.backendName\n // tslint:disable-next-line: no-unused-expression\n this.backend;\n }\n const hasKernel = getKernel(kernelName, this.backendName) != null;\n if (!hasKernel) {\n throw new Error(`Kernel '${kernelName}' not registered for backend '${\n this.backendName}'`);\n }\n return this.runKernelFunc({kernelName, inputs, attrs});\n }\n\n private shouldCheckForMemLeaks(): boolean {\n return this.ENV.getBool('IS_TEST');\n }\n\n private checkKernelForMemLeak(\n kernelName: string, numDataIdsBefore: number,\n outInfos: TensorInfo[]): void {\n const numDataIdsAfter = this.backend.numDataIds();\n\n // Count the number of data ids associated with the result of the kernel.\n let numOutputDataIds = 0;\n outInfos.forEach(info => {\n // Complex numbers allocate 3 data ids, one for 'real', one for\n // 'imaginary', and one for the container that holds the former two.\n numOutputDataIds += (info.dtype === 'complex64' ? 3 : 1);\n });\n\n // Account for the number of moves during kernel execution. A \"data move\"\n // can happen in the middle of a kernel execution, placing a new (key,value)\n // pair in the data storage. Since data moves have net zero effect (we\n // always remove the data from the old backend), we have to cancel them out\n // when detecting memory leaks.\n const numMoves =\n this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1];\n const dataIdsLeaked =\n numDataIdsAfter - numDataIdsBefore - numOutputDataIds - numMoves;\n if (dataIdsLeaked > 0) {\n throw new Error(\n `Backend '${this.backendName}' has an internal memory leak ` +\n `(${dataIdsLeaked} data ids) after running '${kernelName}'`);\n }\n }\n\n /**\n * Internal helper method to execute a kernel Func\n *\n * Use `runKernel` to execute kernels from outside of engine.\n */\n private runKernelFunc(\n kernelParams: RegisteredKernelInvocation|\n CustomGradKernelInvocation): T {\n let outputs: Tensor[];\n let saved: Tensor[] = [];\n const isTapeOn = this.isTapeOn();\n\n const startingBytecount = this.state.numBytes;\n const startingNumTensors = this.state.numTensors;\n\n if (this.shouldCheckForMemLeaks()) {\n this.state.numDataMovesStack.push(0);\n }\n\n let kernelFunc: () => Tensor[];\n if (this.backendName == null) {\n // backend has not been initialized yet (backend initialization is lazy\n // can be deferred until an op/ kernel is run).\n // The below getter has side effects that will try to initialize the\n // backend and set properties like this.backendName\n // tslint:disable-next-line: no-unused-expression\n this.backend;\n }\n\n let out: TensorInfo|TensorInfo[];\n\n const kernelOrScopeName = isRegisteredKernelInvocation(kernelParams) ?\n kernelParams.kernelName :\n this.state.activeScope != null ? this.state.activeScope.name : '';\n\n // Create the kernelFunc from either a registered kernel OR passed in\n // forward/backward functions (used by custom grad). In this context a\n // kernelFunc wraps a kernel implementation with some bookkeeping.\n\n if (isRegisteredKernelInvocation(kernelParams)) {\n const {kernelName, inputs, attrs} = kernelParams;\n if (this.backendName == null) {\n // backend has not been initialized yet (backend initialization is lazy\n // can be deferred until an op/ kernel is run).\n // The below getter has side effects that will try to initialize the\n // backend and set properties like this.backendName\n // tslint:disable-next-line: no-unused-expression\n this.backend;\n }\n const kernel = getKernel(kernelName, this.backendName);\n util.assert(\n kernel != null,\n () => `Cannot find registered kernel '${kernelName}' for backend '${\n this.backendName}'`);\n\n kernelFunc = () => {\n const numDataIdsBefore = this.backend.numDataIds();\n out = kernel.kernelFunc({inputs, attrs, backend: this.backend});\n const outInfos = Array.isArray(out) ? out : [out];\n if (this.shouldCheckForMemLeaks()) {\n this.checkKernelForMemLeak(kernelName, numDataIdsBefore, outInfos);\n }\n\n const outTensors = outInfos.map((outInfo: TensorInfo|Tensor) => {\n // todo (yassogba) remove this option (Tensor) when node backend\n // methods have been modularized and they all return tensorInfo.\n // TensorInfos do not have a rank attribute.\n if ((outInfo as Tensor).rank != null) {\n return outInfo as Tensor;\n }\n const {dataId, shape, dtype} = outInfo as TensorInfo;\n return this.makeTensorFromDataId(dataId, shape, dtype);\n });\n\n // Save any required inputs and outputs.\n\n // Do not save unless we are recording to the tape. Otherwise it would\n // cause a mem leak since there would be no backprop for these tensors\n // (which would otherwise dispose them).\n if (isTapeOn) {\n const tensorsToSave =\n this.getTensorsForGradient(kernelName, inputs, outTensors);\n saved = this.saveTensorsForBackwardMode(tensorsToSave);\n }\n return outTensors;\n };\n } else {\n const {forwardFunc} = kernelParams;\n // Running a customGrad op.\n const saveFunc: GradSaveFunc = (tensors) => {\n // Do not save unless we are recording to the tape. Otherwise it would\n // cause a mem leak since we would never run backprop, which disposes\n // the kept tensors.\n if (!isTapeOn) {\n return;\n }\n saved = tensors.map(tensor => this.keep(this.clone(tensor)));\n };\n\n kernelFunc = () => {\n const numDataIdsBefore = this.backend.numDataIds();\n out = this.tidy(() => forwardFunc(this.backend, saveFunc));\n const outs = (Array.isArray(out) ? out : [out]) as Tensor[];\n if (this.shouldCheckForMemLeaks()) {\n // Scope name is used to print a more helpful error message if needed.\n this.checkKernelForMemLeak(kernelOrScopeName, numDataIdsBefore, outs);\n }\n return outs;\n };\n }\n\n //\n // Run the kernelFunc. Optionally profiling it.\n //\n const {inputs, attrs} = kernelParams;\n const backwardsFunc = isRegisteredKernelInvocation(kernelParams) ?\n null :\n kernelParams.backwardsFunc;\n\n let kernelProfile: KernelProfile;\n this.scopedRun(\n // Stop recording to a tape when running a kernel.\n () => this.state.kernelDepth++, () => this.state.kernelDepth--, () => {\n if (!this.ENV.getBool('DEBUG') && !this.state.profiling) {\n outputs = kernelFunc();\n } else {\n kernelProfile = this.profiler.profileKernel(\n kernelOrScopeName, inputs, () => kernelFunc());\n if (this.ENV.getBool('DEBUG')) {\n this.profiler.logKernelProfile(kernelProfile);\n }\n outputs = kernelProfile.outputs;\n }\n });\n\n if (isTapeOn) {\n this.addTapeNode(\n kernelOrScopeName, inputs, outputs, backwardsFunc, saved, attrs);\n }\n\n if (this.state.profiling) {\n this.state.activeProfile.kernels.push({\n name: kernelOrScopeName,\n bytesAdded: this.state.numBytes - startingBytecount,\n totalBytesSnapshot: this.state.numBytes,\n tensorsAdded: this.state.numTensors - startingNumTensors,\n totalTensorsSnapshot: this.state.numTensors,\n inputShapes: Object.keys(inputs).map(\n key => inputs[key] != null ? inputs[key].shape : null),\n outputShapes: outputs.map(item => item.shape),\n kernelTimeMs: kernelProfile.timeMs,\n extraInfo: kernelProfile.extraInfo\n });\n }\n return (Array.isArray(out) ? outputs : outputs[0]) as T;\n }\n\n /**\n * Saves tensors used in forward mode for use in backward mode.\n *\n * @param tensors the list of tensors to save.\n */\n private saveTensorsForBackwardMode(tensors: Tensor[]): Tensor[] {\n const saved = tensors.map(tensor => this.keep(this.clone(tensor)));\n return saved;\n }\n\n /**\n * Returns a list of tensors to save for a given gradient calculation.\n *\n * @param kernelName name of kernel to look up gradient for.\n * @param inputs a map of input tensors.\n * @param outputs an array of output tensors from forward mode of kernel.\n */\n private getTensorsForGradient(\n kernelName: string, inputs: NamedTensorMap,\n outputs: Tensor[]): Tensor[]|null {\n const gradConfig = getGradient(kernelName);\n if (gradConfig != null) {\n const inputsToSave: string[] = gradConfig.inputsToSave || [];\n const outputsToSave: boolean[] = gradConfig.outputsToSave || [];\n\n // If saveAllInputs is true, all inputs will be saved. Otherwise, inputs\n // specified in inputsToSave will be saved.\n let inputTensorsToSave: Tensor[];\n if (gradConfig.saveAllInputs) {\n util.assert(\n Array.isArray(inputs),\n () => 'saveAllInputs is true, expected inputs to be an array.');\n\n inputTensorsToSave = Object.keys(inputs).map((key) => inputs[key]);\n } else {\n inputTensorsToSave = inputsToSave.map((inputName) => inputs[inputName]);\n }\n\n const outputTensorsToSave: Tensor[] =\n outputs.filter((_, i) => outputsToSave[i]);\n\n return inputTensorsToSave.concat(outputTensorsToSave);\n }\n // We return an empty list rather than throw an error because the kernel we\n // are looking up may not actually be relevant to backproping through the\n // overall function\n //\n // See 'does not error if irrelevant (pruned) ops are missing grads' test\n // in gradients_test.ts for an example.\n return [];\n }\n\n /**\n * Internal method used by public APIs for tensor creation. Makes a new\n * tensor with the provided shape, dtype and values. It always\n * creates a new data id and writes the values to the underlying backend.\n */\n makeTensor(\n values: DataValues, shape: number[], dtype: DataType,\n backend?: KernelBackend): Tensor {\n if (values == null) {\n throw new Error('Values passed to engine.makeTensor() are null');\n }\n dtype = dtype || 'float32';\n backend = backend || this.backend;\n let backendVals = values as BackendValues;\n if (dtype === 'string' && util.isString(values[0])) {\n backendVals = (values as string[]).map(d => util.encodeString(d));\n }\n const dataId = backend.write(backendVals, shape, dtype);\n const t = new Tensor(shape, dtype, dataId, this.nextTensorId());\n this.trackTensor(t, backend);\n\n // Count bytes for string tensors.\n if (dtype === 'string') {\n const info = this.state.tensorInfo.get(dataId);\n const newBytes = bytesFromStringArray(backendVals as Uint8Array[]);\n this.state.numBytes += newBytes - info.bytes;\n info.bytes = newBytes;\n }\n return t;\n }\n\n /**\n * Internal method used by backends. Makes a new tensor\n * that is a wrapper around an existing data id. It doesn't create\n * a new data id, only increments the ref count used in memory tracking.\n */\n makeTensorFromDataId(\n dataId: DataId, shape: number[], dtype: DataType,\n backend?: KernelBackend): Tensor {\n dtype = dtype || 'float32';\n const t = new Tensor(shape, dtype, dataId, this.nextTensorId());\n this.trackTensor(t, backend);\n return t;\n }\n\n makeVariable(\n initialValue: Tensor, trainable = true, name?: string,\n dtype?: DataType): Variable {\n name = name || this.nextVariableId().toString();\n if (dtype != null && dtype !== initialValue.dtype) {\n initialValue = initialValue.cast(dtype);\n }\n const v = new Variable(initialValue, trainable, name, this.nextTensorId());\n if (this.state.registeredVariables[v.name] != null) {\n throw new Error(`Variable with name ${v.name} was already registered`);\n }\n this.state.registeredVariables[v.name] = v;\n this.incRef(v, this.backend);\n return v;\n }\n\n trackTensor(a: Tensor, backend: KernelBackend): void {\n this.state.numTensors++;\n if (a.dtype === 'string') {\n this.state.numStringTensors++;\n }\n // Bytes for complex numbers are counted by their components. Bytes for\n // string tensors are counted when writing values.\n let bytes = 0;\n if (a.dtype !== 'complex64' && a.dtype !== 'string') {\n bytes = a.size * util.bytesPerElement(a.dtype);\n }\n this.state.numBytes += bytes;\n\n if (!this.state.tensorInfo.has(a.dataId)) {\n this.state.numDataBuffers++;\n this.state.tensorInfo.set(a.dataId, {\n backend: backend || this.backend,\n dtype: a.dtype,\n shape: a.shape,\n bytes\n });\n }\n\n if (!(a instanceof Variable)) {\n this.track(a);\n }\n }\n\n // Track the tensor by dataId and increase the refCount for the dataId in the\n // backend.\n // TODO(pyu10055): This is currently used by makeVariable method, to increase\n // refCount on the backend for the dataId. It can potentially be replaced with\n // Identity op indead of calling backend directly.\n incRef(a: Tensor, backend: KernelBackend): void {\n this.trackTensor(a, backend);\n this.backend.incRef(a.dataId);\n }\n\n removeDataId(dataId: DataId, backend: KernelBackend) {\n if (this.state.tensorInfo.has(dataId) &&\n this.state.tensorInfo.get(dataId).backend === backend) {\n this.state.tensorInfo.delete(dataId);\n this.state.numDataBuffers--;\n }\n }\n disposeTensor(a: Tensor): void {\n if (!this.state.tensorInfo.has(a.dataId)) {\n return;\n }\n const info = this.state.tensorInfo.get(a.dataId);\n\n this.state.numTensors--;\n if (a.dtype === 'string') {\n this.state.numStringTensors--;\n this.state.numBytes -= info.bytes;\n }\n // Don't count bytes for complex numbers as they are counted by their\n // components.\n if (a.dtype !== 'complex64' && a.dtype !== 'string') {\n const bytes = a.size * util.bytesPerElement(a.dtype);\n this.state.numBytes -= bytes;\n }\n\n // Remove the reference to dataId if backend dispose the data successfully\n if (info.backend.disposeData(a.dataId)) {\n this.removeDataId(a.dataId, info.backend);\n }\n\n // TODO(nsthorat): Construct an error and save the stack trace for\n // debugging when in debug mode. Creating a stack trace is too expensive\n // to do unconditionally.\n }\n\n disposeVariables(): void {\n for (const varName in this.state.registeredVariables) {\n const v = this.state.registeredVariables[varName];\n this.disposeVariable(v);\n }\n }\n\n disposeVariable(v: Variable): void {\n this.disposeTensor(v);\n if (this.state.registeredVariables[v.name] != null) {\n delete this.state.registeredVariables[v.name];\n }\n }\n\n memory(): MemoryInfo {\n const info = this.backend.memory() as MemoryInfo;\n info.numTensors = this.state.numTensors;\n info.numDataBuffers = this.state.numDataBuffers;\n info.numBytes = this.state.numBytes;\n if (this.state.numStringTensors > 0) {\n info.unreliable = true;\n if (info.reasons == null) {\n info.reasons = [];\n }\n info.reasons.push(\n 'Memory usage by string tensors is approximate ' +\n '(2 bytes per character)');\n }\n return info;\n }\n\n async profile(query: () => (TensorContainer | Promise)):\n Promise {\n this.state.profiling = true;\n\n const startBytes = this.state.numBytes;\n const startNumTensors = this.state.numTensors;\n\n this.state.activeProfile.kernels = [];\n this.state.activeProfile.result = await query();\n\n this.state.profiling = false;\n\n this.state.activeProfile.peakBytes = Math.max(\n ...this.state.activeProfile.kernels.map(d => d.totalBytesSnapshot));\n this.state.activeProfile.newBytes = this.state.numBytes - startBytes;\n this.state.activeProfile.newTensors =\n this.state.numTensors - startNumTensors;\n for (const kernel of this.state.activeProfile.kernels) {\n kernel.kernelTimeMs = await kernel.kernelTimeMs;\n kernel.extraInfo = await kernel.extraInfo;\n }\n return this.state.activeProfile;\n }\n\n isTapeOn(): boolean {\n return this.state.gradientDepth > 0 && this.state.kernelDepth === 0;\n }\n\n private addTapeNode(\n kernelName: string, inputs: NamedTensorMap, outputs: Tensor[],\n gradientsFunc: GradFunc, saved: Tensor[], attrs: NamedAttrMap): void {\n const tapeNode: TapeNode =\n {id: this.state.nextTapeNodeId++, kernelName, inputs, outputs, saved};\n\n const gradConfig = getGradient(kernelName);\n if (gradConfig != null) {\n gradientsFunc = gradConfig.gradFunc;\n }\n if (gradientsFunc != null) {\n tapeNode.gradient = (dys: Tensor[]) => {\n // TODO(smilkov): To optimize back-prop, pass dys that are not used in\n // the backprop graph to the user as null instead of zeros\n dys = dys.map((dy, i) => {\n if (dy == null) {\n const output = outputs[i];\n const vals = util.makeZerosTypedArray(output.size, output.dtype);\n return this.makeTensor(vals, output.shape, output.dtype);\n }\n return dy;\n });\n // Grad functions of ops with single outputs expect a dy, while ops\n // with multiple outputs expect dys (array of dy).\n return gradientsFunc(dys.length > 1 ? dys : dys[0], saved, attrs);\n };\n }\n this.state.activeTape.push(tapeNode);\n }\n\n keep(result: T): T {\n result.kept = true;\n return result;\n }\n\n private startTape() {\n if (this.state.gradientDepth === 0) {\n this.state.activeTape = [];\n }\n this.state.gradientDepth++;\n }\n\n private endTape() {\n this.state.gradientDepth--;\n }\n\n /**\n * Start a scope. Use this with endScope() to achieve the same functionality\n * as scope() without the need for a function closure.\n */\n startScope(name?: string) {\n const scopeInfo: ScopeState = {\n track: [],\n name: 'unnamed scope',\n id: this.state.nextScopeId++\n };\n if (name) {\n scopeInfo.name = name;\n }\n this.state.scopeStack.push(scopeInfo);\n this.state.activeScope = scopeInfo;\n }\n\n /**\n * End a scope. Use this with startScope() to achieve the same functionality\n * as scope() without the need for a function closure.\n */\n endScope(result?: TensorContainer) {\n const tensorsToTrackInParent = getTensorsInContainer(result);\n const tensorsToTrackInParentSet =\n new Set(tensorsToTrackInParent.map(t => t.id));\n\n // Dispose the arrays tracked in this scope.\n for (let i = 0; i < this.state.activeScope.track.length; i++) {\n const tensor = this.state.activeScope.track[i];\n if (!tensor.kept && !tensorsToTrackInParentSet.has(tensor.id)) {\n tensor.dispose();\n }\n }\n\n const oldScope = this.state.scopeStack.pop();\n this.state.activeScope = this.state.scopeStack.length === 0 ?\n null :\n this.state.scopeStack[this.state.scopeStack.length - 1];\n\n // Track the current result in the parent scope.\n tensorsToTrackInParent.forEach(tensor => {\n // Only track the tensor if was allocated in the inner scope and is not\n // globally kept.\n if (!tensor.kept && tensor.scopeId === oldScope.id) {\n this.track(tensor);\n }\n });\n }\n\n /**\n * Returns gradients of `f` with respect to each of the `xs`. The gradients\n * returned are of the same length as `xs`, but some might be null if `f`\n * was not a function of that `x`. It also takes optional dy to multiply the\n * gradient, which defaults to `1`.\n */\n gradients(\n f: () => T, xs: Tensor[], dy?: T,\n allowNoGradients = false): {value: T, grads: Tensor[]} {\n util.assert(\n xs.length > 0, () => 'gradients() received an empty list of xs.');\n if (dy != null && dy.dtype !== 'float32') {\n throw new Error(`dy must have 'float32' dtype, but has '${dy.dtype}'`);\n }\n\n const y = this.scopedRun(\n () => this.startTape(), () => this.endTape(),\n () => this.tidy('forward', f));\n\n util.assert(\n y instanceof Tensor,\n () => 'The result y returned by f() must be a tensor.');\n // Filter out the nodes that don't connect x => y.\n const filteredTape = getFilteredNodesXToY(this.state.activeTape, xs, y);\n if (!allowNoGradients && filteredTape.length === 0 && xs.length > 0) {\n throw new Error(\n 'Cannot compute gradient of y=f(x) with respect to x. Make sure ' +\n 'that the f you passed encloses all operations that lead from x ' +\n 'to y.');\n }\n\n return this.tidy('backward', () => {\n const accumulatedGradientMap: {[tensorId: number]: Tensor} = {};\n accumulatedGradientMap[y.id] = (dy == null) ? ones(y.shape) : dy;\n\n // Backprop gradients through the filtered nodes.\n backpropagateGradients(\n accumulatedGradientMap, filteredTape,\n // Pass the tidy function to avoid circular dep with `tape.ts`.\n f => this.tidy(f as ScopeFn),\n // Pass an add function to avoide a circular dep with `tape.ts`.\n add);\n const grads = xs.map(x => accumulatedGradientMap[x.id]);\n\n if (this.state.gradientDepth === 0) {\n // This means that we are not computing higher-order gradients\n // and can clean up the tape.\n this.state.activeTape.forEach(node => {\n for (const tensor of node.saved) {\n tensor.dispose();\n }\n });\n this.state.activeTape = null;\n }\n return {value: y, grads};\n });\n }\n\n customGrad(f: CustomGradientFunc):\n (...args: Array) => T {\n util.assert(\n util.isFunction(f),\n () => 'The f passed in customGrad(f) must be a function.');\n return (...inputs: Tensor[]): T => {\n util.assert(\n inputs.every(t => t instanceof Tensor),\n () => 'The args passed in customGrad(f)(x1, x2,...) must all be ' +\n 'tensors');\n\n let res: {\n value: T,\n gradFunc: (dy: T, saved: Tensor[]) => Tensor | Tensor[],\n };\n const inputMap: NamedTensorMap = {};\n inputs.forEach((input, i) => {\n inputMap[i] = input;\n });\n\n const forwardFunc: ForwardFunc = (_, save) => {\n res = f(...[...inputs, save]);\n util.assert(\n res.value instanceof Tensor,\n () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.value` is a tensor');\n util.assert(\n util.isFunction(res.gradFunc),\n () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.gradFunc` is a function.');\n return res.value;\n };\n\n const backwardsFunc = (dy: T, saved: Tensor[]) => {\n const gradRes = res.gradFunc(dy, saved);\n const grads: Tensor[] = Array.isArray(gradRes) ? gradRes : [gradRes];\n util.assert(\n grads.length === inputs.length,\n () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.gradFunc` is a function that returns ' +\n 'the same number of tensors as inputs passed to f(...).');\n util.assert(\n grads.every(t => t instanceof Tensor),\n () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.gradFunc` is a function that returns ' +\n 'a list of only tensors.');\n const gradMap: {[key: string]: () => Tensor} = {};\n grads.forEach((grad, i) => {\n gradMap[i] = () => grad;\n });\n return gradMap;\n };\n\n return this.runKernelFunc({\n forwardFunc,\n backwardsFunc,\n inputs: inputMap,\n });\n };\n }\n\n readSync(dataId: DataId): BackendValues {\n // Route the read to the correct backend.\n const info = this.state.tensorInfo.get(dataId);\n return info.backend.readSync(dataId);\n }\n read(dataId: DataId): Promise {\n // Route the read to the correct backend.\n const info = this.state.tensorInfo.get(dataId);\n return info.backend.read(dataId);\n }\n\n async time(query: () => void): Promise {\n const start = now();\n const timingInfo = await this.backend.time(query) as TimingInfo;\n timingInfo.wallMs = now() - start;\n return timingInfo;\n }\n\n /**\n * Tracks a Tensor in the current scope to be automatically cleaned up\n * when the current scope ends, and returns the value.\n *\n * @param result The Tensor to track in the current scope.\n */\n private track(result: T): T {\n if (this.state.activeScope != null) {\n result.scopeId = this.state.activeScope.id;\n this.state.activeScope.track.push(result);\n }\n\n return result;\n }\n\n get registeredVariables(): NamedVariableMap {\n return this.state.registeredVariables;\n }\n\n /**\n * Resets the engine state. Removes all backends but does not remove\n * registered backend factories.\n */\n reset(): void {\n // Make any pending promise obsolete.\n this.pendingBackendInitId++;\n\n this.state.dispose();\n this.ENV.reset();\n this.state = new EngineState();\n\n for (const backendName in this.registry) {\n this.disposeRegisteredKernels(backendName);\n this.registry[backendName].dispose();\n delete this.registry[backendName];\n }\n this.backendName = null;\n this.backendInstance = null;\n this.pendingBackendInit = null;\n }\n}\n\nfunction ones(shape: number[]): Tensor {\n const values = makeOnesTypedArray(sizeFromShape(shape), 'float32');\n return ENGINE.makeTensor(values, shape, 'float32');\n}\n\nexport function getOrMakeEngine(): Engine {\n const ns = getGlobalNamespace() as {} as {_tfengine: Engine};\n if (ns._tfengine == null) {\n const environment = new Environment(ns);\n ns._tfengine = new Engine(environment);\n }\n setEnvironmentGlobal(ns._tfengine.ENV);\n\n // Tell the current tensor interface that the global engine is responsible\n // for tracking.\n setTensorTracker(() => ns._tfengine);\n return ns._tfengine;\n}\n\nexport const ENGINE = getOrMakeEngine();\n\n/**\n * A implementation of the add op for use within engine and tape.\n *\n * This allows us to avoid a circular dependency between add.ts and engine.\n * It is exported to be available in tape tests.\n */\nexport function add(a: Tensor, b: Tensor): Tensor {\n // We duplicate Add here to avoid a circular dependency with add.ts.\n const inputs = {a, b};\n return ENGINE.runKernel(Add, inputs as {} as NamedTensorMap);\n}\n", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// tslint:disable-next-line:no-any\nfunction _isNavigatorDefined(): boolean {\n return typeof navigator !== 'undefined' && navigator != null;\n}\n\nexport function isMobile(nav?: Navigator): boolean {\n if (nav || _isNavigatorDefined()) {\n if (!nav) {\n nav = navigator;\n }\n if (nav.product === 'ReactNative') {\n return true;\n }\n\n // tslint:disable-next-line:no-any\n const a = nav.userAgent || nav.vendor ||\n (typeof window !== 'undefined' ? (window as any).opera : '');\n // Use `navigator.userAgentData.mobile` as fallback.\n if (!a) {\n // tslint:disable-next-line:no-any\n const navAny = nav as any;\n return navAny.userAgentData && navAny.userAgentData.mobile;\n }\n // tslint:disable-next-line:max-line-length\n return /(android|bb\\d+|meego).+mobile|avantgo|bada\\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\\.(browser|link)|vodafone|wap|windows ce|xda|xiino/i\n .test(a) ||\n // tslint:disable-next-line:max-line-length\n /1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\\-(n|u)|c55\\/|capi|ccwa|cdm\\-|cell|chtm|cldc|cmd\\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\\-s|devi|dica|dmob|do(c|p)o|ds(12|\\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\\-|_)|g1 u|g560|gene|gf\\-5|g\\-mo|go(\\.w|od)|gr(ad|un)|haie|hcit|hd\\-(m|p|t)|hei\\-|hi(pt|ta)|hp( i|ip)|hs\\-c|ht(c(\\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\\-(20|go|ma)|i230|iac( |\\-|\\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\\/)|klon|kpt |kwc\\-|kyo(c|k)|le(no|xi)|lg( g|\\/(k|l|u)|50|54|\\-[a-w])|libw|lynx|m1\\-w|m3ga|m50\\/|ma(te|ui|xo)|mc(01|21|ca)|m\\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\\-2|po(ck|rt|se)|prox|psio|pt\\-g|qa\\-a|qc(07|12|21|32|60|\\-[2-7]|i\\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\\-|oo|p\\-)|sdk\\/|se(c(\\-|0|1)|47|mc|nd|ri)|sgh\\-|shar|sie(\\-|m)|sk\\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\\-|v\\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\\-|tdg\\-|tel(i|m)|tim\\-|t\\-mo|to(pl|sh)|ts(70|m\\-|m3|m5)|tx\\-9|up(\\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\\-|your|zeto|zte\\-/i\n .test(a.substr(0, 4));\n }\n return false;\n}\n\nexport function isBrowser(): boolean {\n return (typeof window !== 'undefined' && window.document != null) ||\n //@ts-ignore\n (typeof WorkerGlobalScope !== 'undefined');\n}\n", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport './engine';\n\nimport * as device_util from './device_util';\nimport {env} from './environment';\n\nconst ENV = env();\n\n/**\n * This file contains environment-related flag registrations.\n */\n\n/** Whether to enable debug mode. */\nENV.registerFlag('DEBUG', () => false, debugValue => {\n if (debugValue) {\n console.warn(\n 'Debugging mode is ON. The output of every math call will ' +\n 'be downloaded to CPU and checked for NaNs. ' +\n 'This significantly impacts performance.');\n }\n});\n\n/** Whether we are in a browser (as versus, say, node.js) environment. */\nENV.registerFlag('IS_BROWSER', () => device_util.isBrowser());\n\n/** Whether we are in a browser (as versus, say, node.js) environment. */\nENV.registerFlag(\n 'IS_NODE',\n () => (typeof process !== 'undefined') &&\n (typeof process.versions !== 'undefined') &&\n (typeof process.versions.node !== 'undefined'));\n\n/** Whether this browser is Chrome. */\nENV.registerFlag(\n 'IS_CHROME',\n () => typeof navigator !== 'undefined' && navigator != null &&\n navigator.userAgent != null && /Chrome/.test(navigator.userAgent) &&\n /Google Inc/.test(navigator.vendor));\n\n/**\n * True when the environment is \"production\" where we disable safety checks\n * to gain performance.\n */\nENV.registerFlag('PROD', () => false);\n\n/**\n * Whether to do sanity checks when inferring a shape from user-provided\n * values, used when creating a new tensor.\n */\nENV.registerFlag(\n 'TENSORLIKE_CHECK_SHAPE_CONSISTENCY', () => ENV.getBool('DEBUG'));\n\n/** Whether deprecation warnings are enabled. */\nENV.registerFlag('DEPRECATION_WARNINGS_ENABLED', () => true);\n\n/** True if running unit tests. */\nENV.registerFlag('IS_TEST', () => false);\n\n/** Whether to check computation result for errors. */\nENV.registerFlag('CHECK_COMPUTATION_FOR_ERRORS', () => true);\n\n/** Whether the backend needs to wrap input to imageBitmap. */\nENV.registerFlag('WRAP_TO_IMAGEBITMAP', () => false);\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from './engine';\nimport {env} from './environment';\nimport {Tensor} from './tensor';\nimport {DataType, TensorLike} from './types';\nimport {assert, flatten, inferDtype, isTypedArray, toTypedArray} from './util';\n\nexport function inferShape(val: TensorLike, dtype?: DataType): number[] {\n let firstElem: typeof val = val;\n\n if (isTypedArray(val)) {\n return dtype === 'string' ? [] : [val.length];\n }\n if (!Array.isArray(val)) {\n return []; // Scalar.\n }\n const shape: number[] = [];\n\n while (Array.isArray(firstElem) ||\n isTypedArray(firstElem) && dtype !== 'string') {\n shape.push(firstElem.length);\n firstElem = firstElem[0];\n }\n if (Array.isArray(val) &&\n env().getBool('TENSORLIKE_CHECK_SHAPE_CONSISTENCY')) {\n deepAssertShapeConsistency(val, shape, []);\n }\n\n return shape;\n}\n\nfunction deepAssertShapeConsistency(\n val: TensorLike, shape: number[], indices: number[]) {\n indices = indices || [];\n if (!(Array.isArray(val)) && !isTypedArray(val)) {\n assert(\n shape.length === 0,\n () => `Element arr[${indices.join('][')}] is a primitive, ` +\n `but should be an array/TypedArray of ${shape[0]} elements`);\n return;\n }\n assert(\n shape.length > 0,\n () => `Element arr[${indices.join('][')}] should be a primitive, ` +\n `but is an array of ${val.length} elements`);\n assert(\n val.length === shape[0],\n () => `Element arr[${indices.join('][')}] should have ${shape[0]} ` +\n `elements, but has ${val.length} elements`);\n const subShape = shape.slice(1);\n for (let i = 0; i < val.length; ++i) {\n deepAssertShapeConsistency(val[i], subShape, indices.concat(i));\n }\n}\n\nfunction assertDtype(\n expectedDtype: DataType|'numeric'|'string_or_numeric',\n actualDType: DataType, argName: string, functionName: string) {\n if (expectedDtype === 'string_or_numeric') {\n return;\n }\n if (expectedDtype == null) {\n throw new Error(`Expected dtype cannot be null.`);\n }\n if (expectedDtype !== 'numeric' && expectedDtype !== actualDType ||\n expectedDtype === 'numeric' && actualDType === 'string') {\n throw new Error(\n `Argument '${argName}' passed to '${functionName}' must ` +\n `be ${expectedDtype} tensor, but got ${actualDType} tensor`);\n }\n}\n\nexport function convertToTensor(\n x: T|TensorLike, argName: string, functionName: string,\n parseAsDtype: DataType|'numeric'|'string_or_numeric' = 'numeric'): T {\n if (x instanceof Tensor) {\n assertDtype(parseAsDtype, x.dtype, argName, functionName);\n return x;\n }\n let inferredDtype = inferDtype(x);\n // If the user expects a bool/int/float, use that info to update the\n // inferredDtype when it is not a string.\n if (inferredDtype !== 'string' &&\n ['bool', 'int32', 'float32'].indexOf(parseAsDtype) >= 0) {\n inferredDtype = parseAsDtype as DataType;\n }\n assertDtype(parseAsDtype, inferredDtype, argName, functionName);\n\n if ((x == null) ||\n (!isTypedArray(x) && !Array.isArray(x) && typeof x !== 'number' &&\n typeof x !== 'boolean' && typeof x !== 'string')) {\n const type = x == null ? 'null' : (x as {}).constructor.name;\n throw new Error(\n `Argument '${argName}' passed to '${functionName}' must be a ` +\n `Tensor or TensorLike, but got '${type}'`);\n }\n const inferredShape = inferShape(x, inferredDtype);\n if (!isTypedArray(x) && !Array.isArray(x)) {\n x = [x] as number[];\n }\n const skipTypedArray = true;\n const values = inferredDtype !== 'string' ?\n toTypedArray(x, inferredDtype as DataType) :\n flatten(x as string[], [], skipTypedArray) as string[];\n return ENGINE.makeTensor(values, inferredShape, inferredDtype) as T;\n}\n\nexport function convertToTensorArray(\n arg: Array, argName: string, functionName: string,\n parseAsDtype: DataType|'numeric'|'string_or_numeric' = 'numeric'): T[] {\n if (!Array.isArray(arg)) {\n throw new Error(\n `Argument ${argName} passed to ${functionName} must be a ` +\n '`Tensor[]` or `TensorLike[]`');\n }\n const tensors = arg as T[];\n return tensors.map(\n (t, i) =>\n convertToTensor(t, `${argName}[${i}]`, functionName, parseAsDtype));\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {isPromise} from '../util';\n\nexport const OP_SCOPE_SUFFIX = '__op';\n\n/**\n * Used for wrapping functions that perform math operations on\n * Tensors. The function will be wrapped in a named scope that cleans all\n * memory usage after the function is done.\n */\nexport function op(f: {[name: string]: T}): T {\n const keys = Object.keys(f);\n if (keys.length !== 1) {\n throw new Error(\n `Please provide an object with a single key ` +\n `(operation name) mapping to a function. Got an object with ` +\n `${keys.length} keys.`);\n }\n\n let opName = keys[0];\n const fn = f[opName];\n\n // Strip the underscore from the end of the function name.\n if (opName.endsWith('_')) {\n opName = opName.substring(0, opName.length - 1);\n }\n\n // add an __op suffix to distinguish ops from kernels in tf.profile\n opName = opName + OP_SCOPE_SUFFIX;\n\n // tslint:disable-next-line:no-any\n const f2 = (...args: any[]) => {\n ENGINE.startScope(opName);\n try {\n const result = fn(...args);\n if (isPromise(result)) {\n console.error('Cannot return a Promise inside of tidy.');\n }\n ENGINE.endScope(result);\n return result;\n } catch (ex) {\n ENGINE.endScope(null);\n throw ex;\n }\n };\n Object.defineProperty(f2, 'name', {value: opName, configurable: true});\n\n // tslint:disable-next-line:no-any\n return f2 as any as T;\n}\n", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Complex, ComplexInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\n\n/**\n * Converts two real numbers to a complex number.\n *\n * Given a tensor `real` representing the real part of a complex number, and a\n * tensor `imag` representing the imaginary part of a complex number, this\n * operation returns complex numbers elementwise of the form [r0, i0, r1, i1],\n * where r represents the real part and i represents the imag part.\n *\n * The input tensors real and imag must have the same shape.\n *\n * ```js\n * const real = tf.tensor1d([2.25, 3.25]);\n * const imag = tf.tensor1d([4.75, 5.75]);\n * const complex = tf.complex(real, imag);\n *\n * complex.print();\n * ```\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction complex_(real: T|TensorLike, imag: T|TensorLike): T {\n const $real = convertToTensor(real, 'real', 'complex');\n const $imag = convertToTensor(imag, 'imag', 'complex');\n util.assertShapesMatch(\n $real.shape, $imag.shape,\n `real and imag shapes, ${$real.shape} and ${$imag.shape}, ` +\n `must match in call to tf.complex().`);\n\n const inputs: ComplexInputs = {real: $real, imag: $imag};\n return ENGINE.runKernel(Complex, inputs as {} as NamedTensorMap);\n}\n\nexport const complex = op({complex_});\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Tensor} from '../tensor';\nimport {TensorLike, TypedArray} from '../types';\nimport {DataType} from '../types';\nimport {assert, assertNonNegativeIntegerDimensions, flatten, inferDtype, isTypedArray, sizeFromShape, toTypedArray} from '../util';\n\n/** This is shared code across all tensor creation methods. */\nexport function makeTensor(\n values: TensorLike, shape: number[], inferredShape: number[],\n dtype?: DataType): Tensor {\n if (dtype == null) {\n dtype = inferDtype(values);\n }\n if (dtype === 'complex64') {\n throw new Error(\n `Cannot construct a complex64 tensor directly. ` +\n `Please use tf.complex(real, imag).`);\n }\n if (!isTypedArray(values) && !Array.isArray(values) &&\n typeof values !== 'number' && typeof values !== 'boolean' &&\n typeof values !== 'string') {\n throw new Error(\n 'values passed to tensor(values) must be a number/boolean/string or ' +\n 'an array of numbers/booleans/strings, or a TypedArray');\n }\n if (shape != null) {\n assertNonNegativeIntegerDimensions(shape);\n\n const providedSize = sizeFromShape(shape);\n const inferredSize = sizeFromShape(inferredShape);\n assert(\n providedSize === inferredSize,\n () =>\n `Based on the provided shape, [${shape}], the tensor should have ` +\n `${providedSize} values but has ${inferredSize}`);\n\n for (let i = 0; i < inferredShape.length; ++i) {\n const inferred = inferredShape[i];\n const flatDimsDontMatch = i === inferredShape.length - 1 ?\n inferred !== sizeFromShape(shape.slice(i)) :\n true;\n assert(\n inferredShape[i] === shape[i] || !flatDimsDontMatch,\n () => `Error creating a new Tensor. Inferred shape ` +\n `(${inferredShape}) does not match the provided ` +\n `shape (${shape}). `);\n }\n }\n\n if (!isTypedArray(values) && !Array.isArray(values)) {\n values = [values] as number[];\n }\n\n shape = shape || inferredShape;\n values = dtype !== 'string' ?\n toTypedArray(values, dtype) :\n flatten(values as string[], [], true) as string[];\n return ENGINE.makeTensor(values as TypedArray, shape, dtype);\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\nimport {inferShape} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {DataType, Rank, ShapeMap} from '../types';\n\nimport {makeTensor} from './tensor_ops_util';\n\n/**\n * Creates a `tf.Tensor` with the provided values, shape and dtype.\n *\n * ```js\n * // Pass an array of values to create a vector.\n * tf.tensor([1, 2, 3, 4]).print();\n * ```\n *\n * ```js\n * // Pass a nested array of values to make a matrix or a higher\n * // dimensional tensor.\n * tf.tensor([[1, 2], [3, 4]]).print();\n * ```\n *\n * ```js\n * // Pass a flat array and specify a shape yourself.\n * tf.tensor([1, 2, 3, 4], [2, 2]).print();\n * ```\n *\n * @param values The values of the tensor. Can be nested array of numbers,\n * or a flat array, or a `TypedArray`. If the values are strings,\n * they will be encoded as utf-8 and kept as `Uint8Array[]`.\n * @param shape The shape of the tensor. Optional. If not provided,\n * it is inferred from `values`.\n * @param dtype The data type.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function tensor(\n values: TensorLike, shape?: ShapeMap[R], dtype?: DataType): Tensor {\n const inferredShape = inferShape(values, dtype);\n return makeTensor(values, shape, inferredShape, dtype) as Tensor;\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/* Type definitions for exporting and importing of models. */\n\n/**\n * A map from Tensor dtype to number of bytes per element of the Tensor.\n */\nexport const DTYPE_VALUE_SIZE_MAP: {[dtype: string]: number} = {\n 'float32': 4,\n 'float16': 2,\n 'int32': 4,\n 'uint16': 2,\n 'uint8': 1,\n 'bool': 1,\n 'complex64': 8\n};\n\n/**\n * A weight manifest.\n *\n * The weight manifest consists of an ordered list of weight-manifest groups.\n * Each weight-manifest group (\"group\" for short hereafter) consists of a\n * number of weight values stored in a number of paths.\n * See the documentation of `WeightManifestGroupConfig` below for more details.\n */\nexport declare type WeightsManifestConfig = WeightsManifestGroupConfig[];\n\n/**\n * A weight-manifest group.\n *\n * Consists of an ordered list of weight values encoded in binary format,\n * stored in an ordered list of paths.\n */\nexport declare interface WeightsManifestGroupConfig {\n /**\n * An ordered list of paths.\n *\n * Paths are intentionally abstract in order to be general. For example, they\n * can be relative URL paths or relative paths on the file system.\n */\n paths: string[];\n\n /**\n * Specifications of the weights stored in the paths.\n */\n weights: WeightsManifestEntry[];\n}\n\n/**\n * Group to which the weight belongs.\n *\n * - 'optimizer': Weight from a stateful optimizer.\n */\nexport type WeightGroup = 'model'|'optimizer';\n\n/**\n * An entry in the weight manifest.\n *\n * The entry contains specification of a weight.\n */\nexport declare interface WeightsManifestEntry {\n /**\n * Name of the weight, e.g., 'Dense_1/bias'\n */\n name: string;\n\n /**\n * Shape of the weight.\n */\n shape: number[];\n\n /**\n * Data type of the weight.\n */\n dtype: 'float32'|'int32'|'bool'|'string'|'complex64';\n\n /**\n * Type of the weight.\n *\n * Optional.\n *\n * The value 'optimizer' indicates the weight belongs to an optimizer\n * (i.e., used only during model training and not during inference).\n */\n group?: WeightGroup;\n\n /**\n * Information for dequantization of the weight.\n */\n quantization?: {\n scale?: number, // The scaling constant to multiply by.\n min?: number, // The (possibly nudged) minimum weight to add.\n dtype: 'uint16'|'uint8'|'float16' // The dtype of the quantized weights.\n };\n}\n\n/**\n * Options for saving a model.\n * @innamespace io\n */\nexport interface SaveConfig {\n /**\n * Whether to save only the trainable weights of the model, ignoring the\n * non-trainable ones.\n */\n trainableOnly?: boolean;\n\n /**\n * Whether the optimizer will be saved (if exists).\n *\n * Default: `false`.\n */\n includeOptimizer?: boolean;\n}\n\n/**\n * Result of a saving operation.\n */\nexport interface SaveResult {\n /**\n * Information about the model artifacts saved.\n */\n modelArtifactsInfo: ModelArtifactsInfo;\n\n /**\n * HTTP responses from the server that handled the model-saving request (if\n * any). This is applicable only to server-based saving routes.\n */\n responses?: Response[];\n\n /**\n * Error messages and related data (if any).\n */\n errors?: Array<{}|string>;\n}\n\nexport declare interface ModelArtifactsInfo {\n /**\n * Timestamp for when the model is saved.\n */\n dateSaved: Date;\n\n /**\n * TODO (cais,yassogba) consider removing GraphDef as GraphDefs now\n * come in a JSON format and none of our IOHandlers support a non json\n * format. We could conder replacing this with 'Binary' if we want to\n * allow future handlers to save to non json formats (though they will\n * probably want more information than 'Binary').\n * Type of the model topology\n *\n * Type of the model topology\n *\n * Possible values:\n * - JSON: JSON config (human-readable, e.g., Keras JSON).\n * - GraphDef: TensorFlow\n * [GraphDef](https://www.tensorflow.org/extend/tool_developers/#graphdef)\n * protocol buffer (binary).\n */\n modelTopologyType: 'JSON'|'GraphDef';\n\n /**\n * Size of model topology (Keras JSON or GraphDef), in bytes.\n */\n modelTopologyBytes?: number;\n\n /**\n * Size of weight specification or manifest, in bytes.\n */\n weightSpecsBytes?: number;\n\n /**\n * Size of weight value data, in bytes.\n */\n weightDataBytes?: number;\n}\n\n/** Model training configuration. */\nexport declare interface TrainingConfig {\n // TODO(cais): Tighten the typing once keras spec is available to tfjs-core.\n // See\n // tslint:disable-next-line:max-line-length\n // https://github.com/tensorflow/tfjs-layers/blob/master/src/keras_format/training_config.ts\n /** Optimizer used for the model training. */\n optimizer_config: {};\n\n // TODO(cais): Tighten the typing once keras spec is available to tfjs-core.\n /** Loss function(s) for the model's output(s). */\n loss: string|string[]|{[key: string]: string};\n\n // TODO(cais): Tighten the typing once keras spec is available to tfjs-core.\n /** Metric function(s) for the model's output(s). */\n metrics?: string[]|{[key: string]: string};\n\n // TODO(cais): Tighten the typing once keras spec is available to tfjs-core.\n weighted_metrics?: string[];\n\n // TODO(cais): Tighten the typing once keras spec is available to tfjs-core.\n sample_weight_mode?: string;\n\n loss_weights?: number[]|{[key: string]: number};\n}\n\n/**\n * The serialized artifacts of a model, including topology and weights.\n *\n * The `modelTopology`, `trainingConfig`, `weightSpecs` and `weightData` fields\n * of this interface are optional, in order to support topology- or weights-only\n * saving and loading.\n *\n * Note this interface is used internally in IOHandlers. For the file format\n * written to disk as `model.json`, see `ModelJSON`.\n */\nexport declare interface ModelArtifacts {\n /**\n * Model topology.\n *\n * For Keras-style `tf.Model`s, this is a JSON object.\n * For TensorFlow-style models (e.g., `SavedModel`), this is the JSON\n * encoding of the `GraphDef` protocol buffer.\n */\n modelTopology?: {}|ArrayBuffer;\n\n /**\n * Serialized configuration for the model's training.\n */\n trainingConfig?: TrainingConfig;\n\n /**\n * Weight specifications.\n *\n * This corresponds to the weightsData below.\n */\n weightSpecs?: WeightsManifestEntry[];\n\n /**\n * Binary buffer for all weight values concatenated in the order specified\n * by `weightSpecs`.\n */\n weightData?: ArrayBuffer;\n\n /**\n * Hard-coded format name for models saved from TensorFlow.js or converted\n * by TensorFlow.js Converter.\n */\n format?: string;\n\n /**\n * What library is responsible for originally generating this artifact.\n *\n * Used for debugging purposes. E.g., 'TensorFlow.js v1.0.0'.\n */\n generatedBy?: string;\n\n /**\n * What library or tool is responsible for converting the original model\n * to this format, applicable only if the model is output by a converter.\n *\n * Used for debugging purposes. E.g., 'TensorFlow.js Converter v1.0.0'.\n *\n * A value of `null` means the model artifacts are generated without any\n * conversion process (e.g., saved directly from a TensorFlow.js\n * `tf.LayersModel` instance.)\n */\n convertedBy?: string|null;\n\n /**\n * Inputs and outputs signature for saved model.\n */\n signature?: {};\n\n /**\n * User-defined metadata about the model.\n */\n userDefinedMetadata?: {[key: string]: {}};\n\n /**\n * Initializer for the model.\n */\n modelInitializer?: {};\n}\n\n/**\n * The on-disk format of the `model.json` file.\n *\n * TF.js 1.0 always populates the optional fields when writing model.json.\n * Prior versions did not provide those fields.\n */\nexport declare interface ModelJSON {\n /**\n * Model topology.\n *\n * For Keras-style `tf.Model`s, this is a JSON object.\n * For TensorFlow-style models (e.g., `SavedModel`), this is the JSON\n * encoding of the `GraphDef` protocol buffer.\n */\n modelTopology: {};\n\n /** Model training configuration. */\n trainingConfig?: TrainingConfig;\n\n /**\n * Weights manifest.\n *\n * The weights manifest consists of an ordered list of weight-manifest\n * groups. Each weight-manifest group consists of a number of weight values\n * stored in a number of paths. See the documentation of\n * `WeightsManifestConfig` for more details.\n */\n weightsManifest: WeightsManifestConfig;\n\n /**\n * Hard-coded format name for models saved from TensorFlow.js or converted\n * by TensorFlow.js Converter.\n */\n format?: string;\n\n /**\n * What library is responsible for originally generating this artifact.\n *\n * Used for debugging purposes. E.g., 'TensorFlow.js v1.0.0'.\n */\n generatedBy?: string;\n\n /**\n * What library or tool is responsible for converting the original model\n * to this format, applicable only if the model is output by a converter.\n *\n * Used for debugging purposes. E.g., 'TensorFlow.js Converter v1.0.0'.\n *\n * A value of `null` means the model artifacts are generated without any\n * conversion process (e.g., saved directly from a TensorFlow.js\n * `tf.LayersModel` instance.)\n */\n convertedBy?: string|null;\n\n /**\n * Inputs and outputs signature for saved model.\n */\n signature?: {};\n\n /**\n * User-defined metadata about the model.\n */\n userDefinedMetadata?: {[key: string]: {}};\n\n /**\n * Initializer for the model.\n */\n modelInitializer?: {};\n}\n\n/**\n * Type definition for handlers of loading operations.\n */\nexport type LoadHandler = () => Promise;\n\n/**\n * Type definition for handlers of saving operations.\n */\nexport type SaveHandler = (modelArtifact: ModelArtifacts) =>\n Promise;\n\n/**\n * Interface for a model import/export handler.\n *\n * The `save` and `load` handlers are both optional, in order to allow handlers\n * that support only saving or loading.\n */\n// tslint:disable-next-line:interface-name\nexport interface IOHandler {\n save?: SaveHandler;\n load?: LoadHandler;\n}\n\n/**\n * An interface for the manager of a model store.\n *\n * A model store is defined as a storage medium on which multiple models can\n * be stored. Each stored model has a unique `path` as its identifier.\n * A `ModelStoreManager` for the store allows actions including\n *\n * - Listing the models stored in the store.\n * - Deleting a model from the store.\n */\nexport interface ModelStoreManager {\n /**\n * List all models in the model store.\n *\n * @returns A dictionary mapping paths of existing models to their\n * model artifacts info. Model artifacts info include type of the model's\n * topology, byte sizes of the topology, weights, etc.\n */\n listModels(): Promise<{[path: string]: ModelArtifactsInfo}>;\n\n /**\n * Remove a model specified by `path`.\n *\n * @param path\n * @returns ModelArtifactsInfo of the deleted model (if and only if deletion\n * is successful).\n * @throws Error if deletion fails, e.g., if no model exists at `path`.\n */\n removeModel(path: string): Promise;\n}\n\n/**\n * Callback for the progress of a long-running action such as an HTTP\n * request for a large binary object.\n *\n * `fraction` should be a number in the [0, 1] interval, indicating how\n * much of the action has completed.\n */\nexport type OnProgressCallback = (fraction: number) => void;\n\n/** @innamespace io */\nexport interface LoadOptions {\n /**\n * RequestInit (options) for HTTP requests.\n *\n * For detailed information on the supported fields, see\n * [https://developer.mozilla.org/en-US/docs/Web/API/Request/Request](\n * https://developer.mozilla.org/en-US/docs/Web/API/Request/Request)\n */\n requestInit?: RequestInit;\n\n /**\n * Progress callback.\n */\n onProgress?: OnProgressCallback;\n\n /**\n * A function used to override the `window.fetch` function.\n */\n fetchFunc?: Function;\n\n /**\n * Strict loading model: whether extraneous weights or missing\n * weights should trigger an `Error`.\n *\n * If `true`, require that the provided weights exactly match those\n * required by the layers. `false` means that both extra weights\n * and missing weights will be silently ignored.\n *\n * Default: `true`.\n */\n strict?: boolean;\n\n /**\n * Path prefix for weight files, by default this is calculated from the\n * path of the model JSON file.\n *\n * For instance, if the path to the model JSON file is\n * `http://localhost/foo/model.json`, then the default path prefix will be\n * `http://localhost/foo/`. If a weight file has the path value\n * `group1-shard1of2` in the weight manifest, then the weight file will be\n * loaded from `http://localhost/foo/group1-shard1of2` by default. However,\n * if you provide a `weightPathPrefix` value of\n * `http://localhost/foo/alt-weights`, then the weight file will be loaded\n * from the path `http://localhost/foo/alt-weights/group1-shard1of2` instead.\n */\n weightPathPrefix?: string;\n\n /**\n * Whether the module or model is to be loaded from TF Hub.\n *\n * Setting this to `true` allows passing a TF-Hub module URL, omitting the\n * standard model file name and the query parameters.\n *\n * Default: `false`.\n */\n fromTFHub?: boolean;\n\n /**\n * An async function to convert weight file name to URL. The weight file\n * names are stored in model.json's weightsManifest.paths field. By default we\n * consider weight files are colocated with the model.json file. For example:\n * model.json URL: https://www.google.com/models/1/model.json\n * group1-shard1of1.bin url:\n * https://www.google.com/models/1/group1-shard1of1.bin\n *\n * With this func you can convert the weight file name to any URL.\n */\n weightUrlConverter?: (weightFileName: string) => Promise;\n}\n\n/**\n * Additional options for Platform.fetch\n */\nexport interface RequestDetails {\n /**\n * Is this request for a binary file (as opposed to a json file)\n */\n isBinary?: boolean;\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {complex} from '../ops/complex';\nimport {tensor} from '../ops/tensor';\nimport {NamedTensor, NamedTensorMap} from '../tensor_types';\nimport {TypedArray} from '../types';\nimport {sizeFromShape} from '../util';\n\nimport {DTYPE_VALUE_SIZE_MAP, ModelArtifacts, ModelArtifactsInfo, ModelJSON, WeightGroup, WeightsManifestConfig, WeightsManifestEntry} from './types';\n\n/** Number of bytes reserved for the length of the string. (32bit integer). */\nconst NUM_BYTES_STRING_LENGTH = 4;\n\n/**\n * Encode a map from names to weight values as an ArrayBuffer, along with an\n * `Array` of `WeightsManifestEntry` as specification of the encoded weights.\n *\n * This function does not perform sharding.\n *\n * This function is the reverse of `decodeWeights`.\n *\n * @param tensors A map (\"dict\") from names to tensors.\n * @param group Group to which the weights belong (optional).\n * @returns A `Promise` of\n * - A flat `ArrayBuffer` with all the binary values of the `Tensor`s\n * concatenated.\n * - An `Array` of `WeightManifestEntry`s, carrying information including\n * tensor names, `dtype`s and shapes.\n * @throws Error: on unsupported tensor `dtype`.\n */\nexport async function encodeWeights(\n tensors: NamedTensorMap|NamedTensor[], group?: WeightGroup):\n Promise<{data: ArrayBuffer, specs: WeightsManifestEntry[]}> {\n // TODO(adarob, cais): Support quantization.\n const specs: WeightsManifestEntry[] = [];\n const dataPromises: Array> = [];\n\n const names: string[] = Array.isArray(tensors) ?\n tensors.map(tensor => tensor.name) :\n Object.keys(tensors);\n\n for (let i = 0; i < names.length; ++i) {\n const name = names[i];\n const t = Array.isArray(tensors) ? tensors[i].tensor : tensors[name];\n if (t.dtype !== 'float32' && t.dtype !== 'int32' && t.dtype !== 'bool' &&\n t.dtype !== 'string' && t.dtype !== 'complex64') {\n throw new Error(`Unsupported dtype in weight '${name}': ${t.dtype}`);\n }\n const spec: WeightsManifestEntry = {name, shape: t.shape, dtype: t.dtype};\n if (t.dtype === 'string') {\n const utf8bytes = new Promise(async resolve => {\n const vals = await t.bytes() as Uint8Array[];\n const totalNumBytes = vals.reduce((p, c) => p + c.length, 0) +\n NUM_BYTES_STRING_LENGTH * vals.length;\n const bytes = new Uint8Array(totalNumBytes);\n let offset = 0;\n for (let i = 0; i < vals.length; i++) {\n const val = vals[i];\n const bytesOfLength =\n new Uint8Array(new Uint32Array([val.length]).buffer);\n bytes.set(bytesOfLength, offset);\n offset += NUM_BYTES_STRING_LENGTH;\n bytes.set(val, offset);\n offset += val.length;\n }\n resolve(bytes);\n });\n dataPromises.push(utf8bytes);\n } else {\n dataPromises.push(t.data());\n }\n if (group != null) {\n spec.group = group;\n }\n specs.push(spec);\n }\n\n const tensorValues = await Promise.all(dataPromises);\n return {data: concatenateTypedArrays(tensorValues), specs};\n}\n\n/**\n * Decode flat ArrayBuffer as weights.\n *\n * This function does not handle sharding.\n *\n * This function is the reverse of `encodeWeights`.\n *\n * @param buffer A flat ArrayBuffer carrying the binary values of the tensors\n * concatenated in the order specified in `specs`.\n * @param specs Specifications of the names, dtypes and shapes of the tensors\n * whose value are encoded by `buffer`.\n * @return A map from tensor name to tensor value, with the names corresponding\n * to names in `specs`.\n * @throws Error, if any of the tensors has unsupported dtype.\n */\nexport function decodeWeights(\n buffer: ArrayBuffer, specs: WeightsManifestEntry[]): NamedTensorMap {\n // TODO(adarob, cais): Support quantization.\n const out: NamedTensorMap = {};\n let float16Decode: (buffer: Uint16Array) => Float32Array | undefined;\n let offset = 0;\n for (const spec of specs) {\n const name = spec.name;\n const dtype = spec.dtype;\n const shape = spec.shape;\n const size = sizeFromShape(shape);\n let values: TypedArray|string[]|Uint8Array[];\n\n if ('quantization' in spec) {\n const quantization = spec.quantization;\n if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') {\n if (!('min' in quantization && 'scale' in quantization)) {\n throw new Error(\n `Weight ${spec.name} with quantization ${quantization.dtype} ` +\n `doesn't have corresponding metadata min and scale.`);\n }\n } else if (quantization.dtype === 'float16') {\n if (dtype !== 'float32') {\n throw new Error(\n `Weight ${spec.name} is quantized with ${quantization.dtype} ` +\n `which only supports weights of type float32 not ${dtype}.`);\n }\n } else {\n throw new Error(\n `Weight ${spec.name} has unknown ` +\n `quantization dtype ${quantization.dtype}. ` +\n `Supported quantization dtypes are: ` +\n `'uint8', 'uint16', and 'float16'.`);\n }\n const quantizationSizeFactor = DTYPE_VALUE_SIZE_MAP[quantization.dtype];\n const byteBuffer =\n buffer.slice(offset, offset + size * quantizationSizeFactor);\n const quantizedArray = (quantization.dtype === 'uint8') ?\n new Uint8Array(byteBuffer) :\n new Uint16Array(byteBuffer);\n if (dtype === 'float32') {\n if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') {\n values = new Float32Array(quantizedArray.length);\n for (let i = 0; i < quantizedArray.length; i++) {\n const v = quantizedArray[i];\n values[i] = v * quantization.scale + quantization.min;\n }\n } else if (quantization.dtype === 'float16') {\n if (float16Decode === undefined) {\n float16Decode = getFloat16Decoder();\n }\n values = float16Decode(quantizedArray as Uint16Array);\n } else {\n throw new Error(\n `Unsupported quantization type ${quantization.dtype} ` +\n `for weight type float32.`);\n }\n } else if (dtype === 'int32') {\n if (quantization.dtype !== 'uint8' && quantization.dtype !== 'uint16') {\n throw new Error(\n `Unsupported quantization type ${quantization.dtype} ` +\n `for weight type int32.`);\n }\n values = new Int32Array(quantizedArray.length);\n for (let i = 0; i < quantizedArray.length; i++) {\n const v = quantizedArray[i];\n values[i] = Math.round(v * quantization.scale + quantization.min);\n }\n } else {\n throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`);\n }\n offset += size * quantizationSizeFactor;\n } else if (dtype === 'string') {\n const size = sizeFromShape(spec.shape);\n values = [];\n for (let i = 0; i < size; i++) {\n const byteLength = new Uint32Array(\n buffer.slice(offset, offset + NUM_BYTES_STRING_LENGTH))[0];\n offset += NUM_BYTES_STRING_LENGTH;\n const bytes = new Uint8Array(buffer.slice(offset, offset + byteLength));\n (values as Uint8Array[]).push(bytes);\n offset += byteLength;\n }\n } else {\n const dtypeFactor = DTYPE_VALUE_SIZE_MAP[dtype];\n const byteBuffer = buffer.slice(offset, offset + size * dtypeFactor);\n\n if (dtype === 'float32') {\n values = new Float32Array(byteBuffer);\n } else if (dtype === 'int32') {\n values = new Int32Array(byteBuffer);\n } else if (dtype === 'bool') {\n values = new Uint8Array(byteBuffer);\n } else if (dtype === 'complex64') {\n values = new Float32Array(byteBuffer);\n const real = new Float32Array(values.length / 2);\n const image = new Float32Array(values.length / 2);\n for (let i = 0; i < real.length; i++) {\n real[i] = values[i * 2];\n image[i] = values[i * 2 + 1];\n }\n const realTensor = tensor(real, shape, 'float32');\n const imageTensor = tensor(image, shape, 'float32');\n out[name] = complex(realTensor, imageTensor);\n realTensor.dispose();\n imageTensor.dispose();\n } else {\n throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`);\n }\n offset += size * dtypeFactor;\n }\n if (dtype !== 'complex64') {\n out[name] = tensor(values, shape, dtype);\n }\n }\n return out;\n}\n\n/**\n * Concatenate TypedArrays into an ArrayBuffer.\n */\nexport function concatenateTypedArrays(xs: TypedArray[]): ArrayBuffer {\n // TODO(adarob, cais): Support quantization.\n if (xs === null) {\n throw new Error(`Invalid input value: ${JSON.stringify(xs)}`);\n }\n\n let totalByteLength = 0;\n\n // `normalizedXs` is here for this reason: a `TypedArray`'s `buffer'\n // can have a different byte length from that of the `TypedArray` itself,\n // for example, when the `TypedArray` is created from an offset in an\n // `ArrayBuffer`. `normliazedXs` holds `TypedArray`s whose `buffer`s match\n // the `TypedArray` in byte length. If an element of `xs` does not show\n // this property, a new `TypedArray` that satisfy this property will be\n // constructed and pushed into `normalizedXs`.\n const normalizedXs: TypedArray[] = [];\n xs.forEach((x: TypedArray) => {\n totalByteLength += x.byteLength;\n // tslint:disable:no-any\n normalizedXs.push(\n x.byteLength === x.buffer.byteLength ? x :\n new (x.constructor as any)(x));\n if (!(x as any instanceof Float32Array || x as any instanceof Int32Array ||\n x as any instanceof Uint8Array)) {\n throw new Error(`Unsupported TypedArray subtype: ${x.constructor.name}`);\n }\n // tslint:enable:no-any\n });\n\n const y = new Uint8Array(totalByteLength);\n let offset = 0;\n normalizedXs.forEach((x: TypedArray) => {\n y.set(new Uint8Array(x.buffer), offset);\n offset += x.byteLength;\n });\n\n return y.buffer;\n}\n\n// Use Buffer on Node.js instead of Blob/atob/btoa\nconst useNodeBuffer = typeof Buffer !== 'undefined' &&\n (typeof Blob === 'undefined' || typeof atob === 'undefined' ||\n typeof btoa === 'undefined');\n\n/**\n * Calculate the byte length of a JavaScript string.\n *\n * Note that a JavaScript string can contain wide characters, therefore the\n * length of the string is not necessarily equal to the byte length.\n *\n * @param str Input string.\n * @returns Byte length.\n */\nexport function stringByteLength(str: string): number {\n if (useNodeBuffer) {\n return Buffer.byteLength(str);\n }\n return new Blob([str]).size;\n}\n\n/**\n * Encode an ArrayBuffer as a base64 encoded string.\n *\n * @param buffer `ArrayBuffer` to be converted.\n * @returns A string that base64-encodes `buffer`.\n */\nexport function arrayBufferToBase64String(buffer: ArrayBuffer): string {\n if (useNodeBuffer) {\n return Buffer.from(buffer).toString('base64');\n }\n const buf = new Uint8Array(buffer);\n let s = '';\n for (let i = 0, l = buf.length; i < l; i++) {\n s += String.fromCharCode(buf[i]);\n }\n return btoa(s);\n}\n\n/**\n * Decode a base64 string as an ArrayBuffer.\n *\n * @param str Base64 string.\n * @returns Decoded `ArrayBuffer`.\n */\nexport function base64StringToArrayBuffer(str: string): ArrayBuffer {\n if (useNodeBuffer) {\n const buf = Buffer.from(str, 'base64');\n return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength);\n }\n const s = atob(str);\n const buffer = new Uint8Array(s.length);\n for (let i = 0; i < s.length; ++i) {\n buffer.set([s.charCodeAt(i)], i);\n }\n return buffer.buffer;\n}\n\n/**\n * Concatenate a number of ArrayBuffers into one.\n *\n * @param buffers A number of array buffers to concatenate.\n * @returns Result of concatenating `buffers` in order.\n */\nexport function concatenateArrayBuffers(buffers: ArrayBuffer[]): ArrayBuffer {\n if (buffers.length === 1) {\n return buffers[0];\n }\n\n let totalByteLength = 0;\n buffers.forEach((buffer: ArrayBuffer) => {\n totalByteLength += buffer.byteLength;\n });\n\n const temp = new Uint8Array(totalByteLength);\n let offset = 0;\n buffers.forEach((buffer: ArrayBuffer) => {\n temp.set(new Uint8Array(buffer), offset);\n offset += buffer.byteLength;\n });\n return temp.buffer;\n}\n\n/**\n * Get the basename of a path.\n *\n * Behaves in a way analogous to Linux's basename command.\n *\n * @param path\n */\nexport function basename(path: string): string {\n const SEPARATOR = '/';\n path = path.trim();\n while (path.endsWith(SEPARATOR)) {\n path = path.slice(0, path.length - 1);\n }\n const items = path.split(SEPARATOR);\n return items[items.length - 1];\n}\n\n/**\n * Create `ModelJSON` from `ModelArtifacts`.\n *\n * @param artifacts Model artifacts, describing the model and its weights.\n * @param manifest Weight manifest, describing where the weights of the\n * `ModelArtifacts` are stored, and some metadata about them.\n * @returns Object representing the `model.json` file describing the model\n * artifacts and weights\n */\nexport function getModelJSONForModelArtifacts(\n artifacts: ModelArtifacts, manifest: WeightsManifestConfig): ModelJSON {\n const result: ModelJSON = {\n modelTopology: artifacts.modelTopology,\n format: artifacts.format,\n generatedBy: artifacts.generatedBy,\n convertedBy: artifacts.convertedBy,\n weightsManifest: manifest\n };\n if (artifacts.signature != null) {\n result.signature = artifacts.signature;\n }\n if (artifacts.userDefinedMetadata != null) {\n result.userDefinedMetadata = artifacts.userDefinedMetadata;\n }\n if (artifacts.modelInitializer != null) {\n result.modelInitializer = artifacts.modelInitializer;\n }\n if (artifacts.trainingConfig != null) {\n result.trainingConfig = artifacts.trainingConfig;\n }\n return result;\n}\n\n/**\n * Create `ModelArtifacts` from a JSON file.\n *\n * @param modelJSON Object containing the parsed JSON of `model.json`\n * @param loadWeights Function that takes the JSON file's weights manifest,\n * reads weights from the listed path(s), and returns a Promise of the\n * weight manifest entries along with the weights data.\n * @returns A Promise of the `ModelArtifacts`, as described by the JSON file.\n */\nexport async function getModelArtifactsForJSON(\n modelJSON: ModelJSON,\n loadWeights: (weightsManifest: WeightsManifestConfig) => Promise<[\n /* weightSpecs */ WeightsManifestEntry[], /* weightData */ ArrayBuffer\n ]>): Promise {\n const modelArtifacts: ModelArtifacts = {\n modelTopology: modelJSON.modelTopology,\n format: modelJSON.format,\n generatedBy: modelJSON.generatedBy,\n convertedBy: modelJSON.convertedBy\n };\n\n if (modelJSON.trainingConfig != null) {\n modelArtifacts.trainingConfig = modelJSON.trainingConfig;\n }\n if (modelJSON.weightsManifest != null) {\n const [weightSpecs, weightData] =\n await loadWeights(modelJSON.weightsManifest);\n modelArtifacts.weightSpecs = weightSpecs;\n modelArtifacts.weightData = weightData;\n }\n if (modelJSON.signature != null) {\n modelArtifacts.signature = modelJSON.signature;\n }\n if (modelJSON.userDefinedMetadata != null) {\n modelArtifacts.userDefinedMetadata = modelJSON.userDefinedMetadata;\n }\n if (modelJSON.modelInitializer != null) {\n modelArtifacts.modelInitializer = modelJSON.modelInitializer;\n }\n\n return modelArtifacts;\n}\n\n/**\n * Populate ModelArtifactsInfo fields for a model with JSON topology.\n * @param modelArtifacts\n * @returns A ModelArtifactsInfo object.\n */\nexport function getModelArtifactsInfoForJSON(modelArtifacts: ModelArtifacts):\n ModelArtifactsInfo {\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('Expected JSON model topology, received ArrayBuffer.');\n }\n\n return {\n dateSaved: new Date(),\n modelTopologyType: 'JSON',\n modelTopologyBytes: modelArtifacts.modelTopology == null ?\n 0 :\n stringByteLength(JSON.stringify(modelArtifacts.modelTopology)),\n weightSpecsBytes: modelArtifacts.weightSpecs == null ?\n 0 :\n stringByteLength(JSON.stringify(modelArtifacts.weightSpecs)),\n weightDataBytes: modelArtifacts.weightData == null ?\n 0 :\n modelArtifacts.weightData.byteLength,\n };\n}\n\n/**\n * Computes mantisa table for casting Float16 to Float32\n * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n *\n * @returns Uint32Array, 2048 mantissa lookup values.\n */\nfunction computeFloat16MantisaTable(): Uint32Array {\n const convertMantissa = (i: number): number => {\n let m = i << 13;\n let e = 0;\n\n while ((m & 0x00800000) === 0) {\n e -= 0x00800000;\n m <<= 1;\n }\n m &= ~0x00800000;\n e += 0x38800000;\n\n return m | e;\n };\n\n const mantisaTable = new Uint32Array(2048);\n\n mantisaTable[0] = 0;\n for (let i = 1; i < 1024; i++) {\n mantisaTable[i] = convertMantissa(i);\n }\n for (let i = 1024; i < 2048; i++) {\n mantisaTable[i] = 0x38000000 + ((i - 1024) << 13);\n }\n\n return mantisaTable;\n}\n\n/**\n * Computes exponent table for casting Float16 to Float32\n * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n *\n * @returns Uint32Array, 64 exponent lookup values.\n */\nfunction computeFloat16ExponentTable(): Uint32Array {\n const exponentTable = new Uint32Array(64);\n\n exponentTable[0] = 0;\n exponentTable[31] = 0x47800000;\n exponentTable[32] = 0x80000000;\n exponentTable[63] = 0xc7800000;\n for (let i = 1; i < 31; i++) {\n exponentTable[i] = i << 23;\n }\n for (let i = 33; i < 63; i++) {\n exponentTable[i] = 0x80000000 + ((i - 32) << 23);\n }\n\n return exponentTable;\n}\n\n/**\n * Computes offset table for casting Float16 to Float32\n * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n *\n * @returns Uint32Array, 6d offset values.\n */\nfunction computeFloat16OffsetTable(): Uint32Array {\n const offsetTable = new Uint32Array(64);\n\n for (let i = 0; i < 64; i++) {\n offsetTable[i] = 1024;\n }\n offsetTable[0] = offsetTable[32] = 0;\n\n return offsetTable;\n}\n\n/**\n * Retrieve a Float16 decoder which will decode a ByteArray of Float16 values\n * to a Float32Array.\n *\n * @returns Function (buffer: Uint16Array) => Float32Array which decodes\n * the Uint16Array of Float16 bytes to a Float32Array.\n */\nexport function getFloat16Decoder(): (buffer: Uint16Array) => Float32Array {\n // Algorithm is based off of\n // http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n\n // Cache lookup tables\n const mantisaTable = computeFloat16MantisaTable();\n const exponentTable = computeFloat16ExponentTable();\n const offsetTable = computeFloat16OffsetTable();\n\n return (quantizedArray: Uint16Array) => {\n const buffer = new ArrayBuffer(4 * quantizedArray.length);\n const bufferUint32View = new Uint32Array(buffer);\n for (let index = 0; index < quantizedArray.length; index++) {\n const float16Bits = quantizedArray[index];\n const float32Bits =\n mantisaTable[offsetTable[float16Bits >> 10] + (float16Bits & 0x3ff)] +\n exponentTable[float16Bits >> 10];\n bufferUint32View[index] = float32Bits;\n }\n return new Float32Array(buffer);\n };\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {IOHandler, LoadOptions} from './types';\n\nexport type IORouter = (url: string|string[], loadOptions?: LoadOptions) =>\n IOHandler;\n\nexport class IORouterRegistry {\n // Singleton instance.\n private static instance: IORouterRegistry;\n\n private saveRouters: IORouter[];\n private loadRouters: IORouter[];\n\n private constructor() {\n this.saveRouters = [];\n this.loadRouters = [];\n }\n\n private static getInstance(): IORouterRegistry {\n if (IORouterRegistry.instance == null) {\n IORouterRegistry.instance = new IORouterRegistry();\n }\n return IORouterRegistry.instance;\n }\n\n /**\n * Register a save-handler router.\n *\n * @param saveRouter A function that maps a URL-like string onto an instance\n * of `IOHandler` with the `save` method defined or `null`.\n */\n static registerSaveRouter(saveRouter: IORouter) {\n IORouterRegistry.getInstance().saveRouters.push(saveRouter);\n }\n\n /**\n * Register a load-handler router.\n *\n * @param loadRouter A function that maps a URL-like string onto an instance\n * of `IOHandler` with the `load` method defined or `null`.\n */\n static registerLoadRouter(loadRouter: IORouter) {\n IORouterRegistry.getInstance().loadRouters.push(loadRouter);\n }\n\n /**\n * Look up IOHandler for saving, given a URL-like string.\n *\n * @param url\n * @returns If only one match is found, an instance of IOHandler with the\n * `save` method defined. If no match is found, `null`.\n * @throws Error, if more than one match is found.\n */\n static getSaveHandlers(url: string|string[]): IOHandler[] {\n return IORouterRegistry.getHandlers(url, 'save');\n }\n\n /**\n * Look up IOHandler for loading, given a URL-like string.\n *\n * @param url\n * @param loadOptions Optional, custom load options.\n * @returns All valid handlers for `url`, given the currently registered\n * handler routers.\n */\n static getLoadHandlers(url: string|string[], loadOptions?: LoadOptions):\n IOHandler[] {\n return IORouterRegistry.getHandlers(url, 'load', loadOptions);\n }\n\n private static getHandlers(\n url: string|string[], handlerType: 'save'|'load',\n loadOptions?: LoadOptions): IOHandler[] {\n const validHandlers: IOHandler[] = [];\n const routers = handlerType === 'load' ?\n IORouterRegistry.getInstance().loadRouters :\n IORouterRegistry.getInstance().saveRouters;\n routers.forEach(router => {\n const handler = router(url, loadOptions);\n if (handler !== null) {\n validHandlers.push(handler);\n }\n });\n return validHandlers;\n }\n}\n\nexport const registerSaveRouter = (loudRouter: IORouter) =>\n IORouterRegistry.registerSaveRouter(loudRouter);\nexport const registerLoadRouter = (loudRouter: IORouter) =>\n IORouterRegistry.registerLoadRouter(loudRouter);\nexport const getSaveHandlers = (url: string|string[]) =>\n IORouterRegistry.getSaveHandlers(url);\nexport const getLoadHandlers =\n (url: string|string[], loadOptions?: LoadOptions) =>\n IORouterRegistry.getLoadHandlers(url, loadOptions);\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport '../flags';\n\nimport {env} from '../environment';\n\nimport {getModelArtifactsInfoForJSON} from './io_utils';\nimport {IORouter, IORouterRegistry} from './router_registry';\nimport {IOHandler, ModelArtifacts, ModelArtifactsInfo, ModelStoreManager, SaveResult} from './types';\n\nconst DATABASE_NAME = 'tensorflowjs';\nconst DATABASE_VERSION = 1;\n\n// Model data and ModelArtifactsInfo (metadata) are stored in two separate\n// stores for efficient access of the list of stored models and their metadata.\n// 1. The object store for model data: topology, weights and weight manifests.\nconst MODEL_STORE_NAME = 'models_store';\n// 2. The object store for ModelArtifactsInfo, including meta-information such\n// as the type of topology (JSON vs binary), byte size of the topology, byte\n// size of the weights, etc.\nconst INFO_STORE_NAME = 'model_info_store';\n\n/**\n * Delete the entire database for tensorflow.js, including the models store.\n */\nexport async function deleteDatabase(): Promise {\n const idbFactory = getIndexedDBFactory();\n\n return new Promise((resolve, reject) => {\n const deleteRequest = idbFactory.deleteDatabase(DATABASE_NAME);\n deleteRequest.onsuccess = () => resolve();\n deleteRequest.onerror = error => reject(error);\n });\n}\n\nfunction getIndexedDBFactory(): IDBFactory {\n if (!env().getBool('IS_BROWSER')) {\n // TODO(cais): Add more info about what IOHandler subtypes are available.\n // Maybe point to a doc page on the web and/or automatically determine\n // the available IOHandlers and print them in the error message.\n throw new Error(\n 'Failed to obtain IndexedDB factory because the current environment' +\n 'is not a web browser.');\n }\n // tslint:disable-next-line:no-any\n const theWindow: any = typeof window === 'undefined' ? self : window;\n const factory = theWindow.indexedDB || theWindow.mozIndexedDB ||\n theWindow.webkitIndexedDB || theWindow.msIndexedDB ||\n theWindow.shimIndexedDB;\n if (factory == null) {\n throw new Error(\n 'The current browser does not appear to support IndexedDB.');\n }\n return factory;\n}\n\nfunction setUpDatabase(openRequest: IDBRequest) {\n const db = openRequest.result as IDBDatabase;\n db.createObjectStore(MODEL_STORE_NAME, {keyPath: 'modelPath'});\n db.createObjectStore(INFO_STORE_NAME, {keyPath: 'modelPath'});\n}\n\n/**\n * IOHandler subclass: Browser IndexedDB.\n *\n * See the doc string of `browserIndexedDB` for more details.\n */\nexport class BrowserIndexedDB implements IOHandler {\n protected readonly indexedDB: IDBFactory;\n protected readonly modelPath: string;\n\n static readonly URL_SCHEME = 'indexeddb://';\n\n constructor(modelPath: string) {\n this.indexedDB = getIndexedDBFactory();\n\n if (modelPath == null || !modelPath) {\n throw new Error(\n 'For IndexedDB, modelPath must not be null, undefined or empty.');\n }\n this.modelPath = modelPath;\n }\n\n async save(modelArtifacts: ModelArtifacts): Promise {\n // TODO(cais): Support saving GraphDef models.\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error(\n 'BrowserLocalStorage.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n\n return this.databaseAction(this.modelPath, modelArtifacts) as\n Promise;\n }\n\n async load(): Promise {\n return this.databaseAction(this.modelPath) as Promise;\n }\n\n /**\n * Perform database action to put model artifacts into or read model artifacts\n * from IndexedDB object store.\n *\n * Whether the action is put or get depends on whether `modelArtifacts` is\n * specified. If it is specified, the action will be put; otherwise the action\n * will be get.\n *\n * @param modelPath A unique string path for the model.\n * @param modelArtifacts If specified, it will be the model artifacts to be\n * stored in IndexedDB.\n * @returns A `Promise` of `SaveResult`, if the action is put, or a `Promise`\n * of `ModelArtifacts`, if the action is get.\n */\n private databaseAction(modelPath: string, modelArtifacts?: ModelArtifacts):\n Promise {\n return new Promise((resolve, reject) => {\n const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);\n openRequest.onupgradeneeded = () => setUpDatabase(openRequest);\n\n openRequest.onsuccess = () => {\n const db = openRequest.result;\n\n if (modelArtifacts == null) {\n // Read model out from object store.\n const modelTx = db.transaction(MODEL_STORE_NAME, 'readonly');\n const modelStore = modelTx.objectStore(MODEL_STORE_NAME);\n const getRequest = modelStore.get(this.modelPath);\n getRequest.onsuccess = () => {\n if (getRequest.result == null) {\n db.close();\n return reject(new Error(\n `Cannot find model with path '${this.modelPath}' ` +\n `in IndexedDB.`));\n } else {\n resolve(getRequest.result.modelArtifacts);\n }\n };\n getRequest.onerror = error => {\n db.close();\n return reject(getRequest.error);\n };\n modelTx.oncomplete = () => db.close();\n } else {\n // Put model into object store.\n const modelArtifactsInfo: ModelArtifactsInfo =\n getModelArtifactsInfoForJSON(modelArtifacts);\n // First, put ModelArtifactsInfo into info store.\n const infoTx = db.transaction(INFO_STORE_NAME, 'readwrite');\n let infoStore = infoTx.objectStore(INFO_STORE_NAME);\n const putInfoRequest =\n infoStore.put({modelPath: this.modelPath, modelArtifactsInfo});\n let modelTx: IDBTransaction;\n putInfoRequest.onsuccess = () => {\n // Second, put model data into model store.\n modelTx = db.transaction(MODEL_STORE_NAME, 'readwrite');\n const modelStore = modelTx.objectStore(MODEL_STORE_NAME);\n const putModelRequest = modelStore.put({\n modelPath: this.modelPath,\n modelArtifacts,\n modelArtifactsInfo\n });\n putModelRequest.onsuccess = () => resolve({modelArtifactsInfo});\n putModelRequest.onerror = error => {\n // If the put-model request fails, roll back the info entry as\n // well.\n infoStore = infoTx.objectStore(INFO_STORE_NAME);\n const deleteInfoRequest = infoStore.delete(this.modelPath);\n deleteInfoRequest.onsuccess = () => {\n db.close();\n return reject(putModelRequest.error);\n };\n deleteInfoRequest.onerror = error => {\n db.close();\n return reject(putModelRequest.error);\n };\n };\n };\n putInfoRequest.onerror = error => {\n db.close();\n return reject(putInfoRequest.error);\n };\n infoTx.oncomplete = () => {\n if (modelTx == null) {\n db.close();\n } else {\n modelTx.oncomplete = () => db.close();\n }\n };\n }\n };\n openRequest.onerror = error => reject(openRequest.error);\n });\n }\n}\n\nexport const indexedDBRouter: IORouter = (url: string|string[]) => {\n if (!env().getBool('IS_BROWSER')) {\n return null;\n } else {\n if (!Array.isArray(url) && url.startsWith(BrowserIndexedDB.URL_SCHEME)) {\n return browserIndexedDB(url.slice(BrowserIndexedDB.URL_SCHEME.length));\n } else {\n return null;\n }\n }\n};\nIORouterRegistry.registerSaveRouter(indexedDBRouter);\nIORouterRegistry.registerLoadRouter(indexedDBRouter);\n\n/**\n * Creates a browser IndexedDB IOHandler for saving and loading models.\n *\n * ```js\n * const model = tf.sequential();\n * model.add(\n * tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));\n *\n * const saveResult = await model.save('indexeddb://MyModel'));\n * console.log(saveResult);\n * ```\n *\n * @param modelPath A unique identifier for the model to be saved. Must be a\n * non-empty string.\n * @returns An instance of `BrowserIndexedDB` (sublcass of `IOHandler`),\n * which can be used with, e.g., `tf.Model.save`.\n */\nexport function browserIndexedDB(modelPath: string): IOHandler {\n return new BrowserIndexedDB(modelPath);\n}\n\nfunction maybeStripScheme(key: string) {\n return key.startsWith(BrowserIndexedDB.URL_SCHEME) ?\n key.slice(BrowserIndexedDB.URL_SCHEME.length) :\n key;\n}\n\nexport class BrowserIndexedDBManager implements ModelStoreManager {\n private indexedDB: IDBFactory;\n\n constructor() {\n this.indexedDB = getIndexedDBFactory();\n }\n\n async listModels(): Promise<{[path: string]: ModelArtifactsInfo}> {\n return new Promise<{[path: string]: ModelArtifactsInfo}>(\n (resolve, reject) => {\n const openRequest =\n this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);\n openRequest.onupgradeneeded = () => setUpDatabase(openRequest);\n\n openRequest.onsuccess = () => {\n const db = openRequest.result;\n const tx = db.transaction(INFO_STORE_NAME, 'readonly');\n const store = tx.objectStore(INFO_STORE_NAME);\n // tslint:disable:max-line-length\n // Need to cast `store` as `any` here because TypeScript's DOM\n // library does not have the `getAll()` method even though the\n // method is supported in the latest version of most mainstream\n // browsers:\n // https://developer.mozilla.org/en-US/docs/Web/API/IDBObjectStore/getAll\n // tslint:enable:max-line-length\n // tslint:disable-next-line:no-any\n const getAllInfoRequest = (store as any).getAll() as IDBRequest;\n getAllInfoRequest.onsuccess = () => {\n const out: {[path: string]: ModelArtifactsInfo} = {};\n for (const item of getAllInfoRequest.result) {\n out[item.modelPath] = item.modelArtifactsInfo;\n }\n resolve(out);\n };\n getAllInfoRequest.onerror = error => {\n db.close();\n return reject(getAllInfoRequest.error);\n };\n tx.oncomplete = () => db.close();\n };\n openRequest.onerror = error => reject(openRequest.error);\n });\n }\n\n async removeModel(path: string): Promise {\n path = maybeStripScheme(path);\n return new Promise((resolve, reject) => {\n const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);\n openRequest.onupgradeneeded = () => setUpDatabase(openRequest);\n\n openRequest.onsuccess = () => {\n const db = openRequest.result;\n const infoTx = db.transaction(INFO_STORE_NAME, 'readwrite');\n const infoStore = infoTx.objectStore(INFO_STORE_NAME);\n\n const getInfoRequest = infoStore.get(path);\n let modelTx: IDBTransaction;\n getInfoRequest.onsuccess = () => {\n if (getInfoRequest.result == null) {\n db.close();\n return reject(new Error(\n `Cannot find model with path '${path}' ` +\n `in IndexedDB.`));\n } else {\n // First, delete the entry in the info store.\n const deleteInfoRequest = infoStore.delete(path);\n const deleteModelData = () => {\n // Second, delete the entry in the model store.\n modelTx = db.transaction(MODEL_STORE_NAME, 'readwrite');\n const modelStore = modelTx.objectStore(MODEL_STORE_NAME);\n const deleteModelRequest = modelStore.delete(path);\n deleteModelRequest.onsuccess = () =>\n resolve(getInfoRequest.result.modelArtifactsInfo);\n deleteModelRequest.onerror = error =>\n reject(getInfoRequest.error);\n };\n // Proceed with deleting model data regardless of whether deletion\n // of info data succeeds or not.\n deleteInfoRequest.onsuccess = deleteModelData;\n deleteInfoRequest.onerror = error => {\n deleteModelData();\n db.close();\n return reject(getInfoRequest.error);\n };\n }\n };\n getInfoRequest.onerror = error => {\n db.close();\n return reject(getInfoRequest.error);\n };\n\n infoTx.oncomplete = () => {\n if (modelTx == null) {\n db.close();\n } else {\n modelTx.oncomplete = () => db.close();\n }\n };\n };\n openRequest.onerror = error => reject(openRequest.error);\n });\n }\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport '../flags';\nimport {env} from '../environment';\n\nimport {assert} from '../util';\nimport {arrayBufferToBase64String, base64StringToArrayBuffer, getModelArtifactsInfoForJSON} from './io_utils';\nimport {IORouter, IORouterRegistry} from './router_registry';\nimport {IOHandler, ModelArtifacts, ModelArtifactsInfo, ModelJSON, ModelStoreManager, SaveResult} from './types';\n\nconst PATH_SEPARATOR = '/';\nconst PATH_PREFIX = 'tensorflowjs_models';\nconst INFO_SUFFIX = 'info';\nconst MODEL_TOPOLOGY_SUFFIX = 'model_topology';\nconst WEIGHT_SPECS_SUFFIX = 'weight_specs';\nconst WEIGHT_DATA_SUFFIX = 'weight_data';\nconst MODEL_METADATA_SUFFIX = 'model_metadata';\n\n/**\n * Purge all tensorflow.js-saved model artifacts from local storage.\n *\n * @returns Paths of the models purged.\n */\nexport function purgeLocalStorageArtifacts(): string[] {\n if (!env().getBool('IS_BROWSER') || typeof window === 'undefined' ||\n typeof window.localStorage === 'undefined') {\n throw new Error(\n 'purgeLocalStorageModels() cannot proceed because local storage is ' +\n 'unavailable in the current environment.');\n }\n const LS = window.localStorage;\n const purgedModelPaths: string[] = [];\n for (let i = 0; i < LS.length; ++i) {\n const key = LS.key(i);\n const prefix = PATH_PREFIX + PATH_SEPARATOR;\n if (key.startsWith(prefix) && key.length > prefix.length) {\n LS.removeItem(key);\n const modelName = getModelPathFromKey(key);\n if (purgedModelPaths.indexOf(modelName) === -1) {\n purgedModelPaths.push(modelName);\n }\n }\n }\n return purgedModelPaths;\n}\n\ntype LocalStorageKeys = {\n /** Key of the localStorage entry storing `ModelArtifactsInfo`. */\n info: string,\n /**\n * Key of the localStorage entry storing the 'modelTopology' key of\n * `model.json`\n */\n topology: string,\n /**\n * Key of the localStorage entry storing the `weightsManifest.weights` entries\n * of `model.json`\n */\n weightSpecs: string,\n /** Key of the localStorage entry storing the weight data in Base64 */\n weightData: string,\n /**\n * Key of the localStorage entry storing the remaining fields of `model.json`\n * @see {@link ModelMetadata}\n */\n modelMetadata: string,\n};\n\ntype ModelMetadata = Omit;\n\nfunction getModelKeys(path: string): LocalStorageKeys {\n return {\n info: [PATH_PREFIX, path, INFO_SUFFIX].join(PATH_SEPARATOR),\n topology: [PATH_PREFIX, path, MODEL_TOPOLOGY_SUFFIX].join(PATH_SEPARATOR),\n weightSpecs: [PATH_PREFIX, path, WEIGHT_SPECS_SUFFIX].join(PATH_SEPARATOR),\n weightData: [PATH_PREFIX, path, WEIGHT_DATA_SUFFIX].join(PATH_SEPARATOR),\n modelMetadata:\n [PATH_PREFIX, path, MODEL_METADATA_SUFFIX].join(PATH_SEPARATOR)\n };\n}\n\nfunction removeItems(keys: LocalStorageKeys): void {\n for (const key of Object.values(keys)) {\n window.localStorage.removeItem(key);\n }\n}\n\n/**\n * Get model path from a local-storage key.\n *\n * E.g., 'tensorflowjs_models/my/model/1/info' --> 'my/model/1'\n *\n * @param key\n */\nfunction getModelPathFromKey(key: string) {\n const items = key.split(PATH_SEPARATOR);\n if (items.length < 3) {\n throw new Error(`Invalid key format: ${key}`);\n }\n return items.slice(1, items.length - 1).join(PATH_SEPARATOR);\n}\n\nfunction maybeStripScheme(key: string) {\n return key.startsWith(BrowserLocalStorage.URL_SCHEME) ?\n key.slice(BrowserLocalStorage.URL_SCHEME.length) :\n key;\n}\n\n/**\n * IOHandler subclass: Browser Local Storage.\n *\n * See the doc string to `browserLocalStorage` for more details.\n */\nexport class BrowserLocalStorage implements IOHandler {\n protected readonly LS: Storage;\n protected readonly modelPath: string;\n protected readonly keys: LocalStorageKeys;\n\n static readonly URL_SCHEME = 'localstorage://';\n\n constructor(modelPath: string) {\n if (!env().getBool('IS_BROWSER') || typeof window === 'undefined' ||\n typeof window.localStorage === 'undefined') {\n // TODO(cais): Add more info about what IOHandler subtypes are\n // available.\n // Maybe point to a doc page on the web and/or automatically determine\n // the available IOHandlers and print them in the error message.\n throw new Error(\n 'The current environment does not support local storage.');\n }\n this.LS = window.localStorage;\n\n if (modelPath == null || !modelPath) {\n throw new Error(\n 'For local storage, modelPath must not be null, undefined or empty.');\n }\n this.modelPath = modelPath;\n this.keys = getModelKeys(this.modelPath);\n }\n\n /**\n * Save model artifacts to browser local storage.\n *\n * See the documentation to `browserLocalStorage` for details on the saved\n * artifacts.\n *\n * @param modelArtifacts The model artifacts to be stored.\n * @returns An instance of SaveResult.\n */\n async save(modelArtifacts: ModelArtifacts): Promise {\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error(\n 'BrowserLocalStorage.save() does not support saving model topology ' +\n 'in binary formats yet.');\n } else {\n const topology = JSON.stringify(modelArtifacts.modelTopology);\n const weightSpecs = JSON.stringify(modelArtifacts.weightSpecs);\n\n const modelArtifactsInfo: ModelArtifactsInfo =\n getModelArtifactsInfoForJSON(modelArtifacts);\n\n try {\n this.LS.setItem(this.keys.info, JSON.stringify(modelArtifactsInfo));\n this.LS.setItem(this.keys.topology, topology);\n this.LS.setItem(this.keys.weightSpecs, weightSpecs);\n this.LS.setItem(\n this.keys.weightData,\n arrayBufferToBase64String(modelArtifacts.weightData));\n\n // Note that JSON.stringify doesn't write out keys that have undefined\n // values, so for some keys, we set undefined instead of a null-ish\n // value.\n const metadata: Required = {\n format: modelArtifacts.format,\n generatedBy: modelArtifacts.generatedBy,\n convertedBy: modelArtifacts.convertedBy,\n signature: modelArtifacts.signature != null ?\n modelArtifacts.signature :\n undefined,\n userDefinedMetadata: modelArtifacts.userDefinedMetadata != null ?\n modelArtifacts.userDefinedMetadata :\n undefined,\n modelInitializer: modelArtifacts.modelInitializer != null ?\n modelArtifacts.modelInitializer :\n undefined,\n trainingConfig: modelArtifacts.trainingConfig != null ?\n modelArtifacts.trainingConfig :\n undefined\n };\n this.LS.setItem(this.keys.modelMetadata, JSON.stringify(metadata));\n\n return {modelArtifactsInfo};\n } catch (err) {\n // If saving failed, clean up all items saved so far.\n removeItems(this.keys);\n\n throw new Error(\n `Failed to save model '${this.modelPath}' to local storage: ` +\n `size quota being exceeded is a possible cause of this failure: ` +\n `modelTopologyBytes=${modelArtifactsInfo.modelTopologyBytes}, ` +\n `weightSpecsBytes=${modelArtifactsInfo.weightSpecsBytes}, ` +\n `weightDataBytes=${modelArtifactsInfo.weightDataBytes}.`);\n }\n }\n }\n\n /**\n * Load a model from local storage.\n *\n * See the documentation to `browserLocalStorage` for details on the saved\n * artifacts.\n *\n * @returns The loaded model (if loading succeeds).\n */\n async load(): Promise {\n const info =\n JSON.parse(this.LS.getItem(this.keys.info)) as ModelArtifactsInfo;\n if (info == null) {\n throw new Error(\n `In local storage, there is no model with name '${this.modelPath}'`);\n }\n\n if (info.modelTopologyType !== 'JSON') {\n throw new Error(\n 'BrowserLocalStorage does not support loading non-JSON model ' +\n 'topology yet.');\n }\n\n const out: ModelArtifacts = {};\n\n // Load topology.\n const topology = JSON.parse(this.LS.getItem(this.keys.topology));\n if (topology == null) {\n throw new Error(\n `In local storage, the topology of model '${this.modelPath}' ` +\n `is missing.`);\n }\n out.modelTopology = topology;\n\n // Load weight specs.\n const weightSpecs = JSON.parse(this.LS.getItem(this.keys.weightSpecs));\n if (weightSpecs == null) {\n throw new Error(\n `In local storage, the weight specs of model '${this.modelPath}' ` +\n `are missing.`);\n }\n out.weightSpecs = weightSpecs;\n\n // Load meta-data fields.\n const metadataString = this.LS.getItem(this.keys.modelMetadata);\n if (metadataString != null) {\n const metadata = JSON.parse(metadataString) as ModelMetadata;\n out.format = metadata.format;\n out.generatedBy = metadata.generatedBy;\n out.convertedBy = metadata.convertedBy;\n if (metadata.signature != null) {\n out.signature = metadata.signature;\n }\n if (metadata.userDefinedMetadata != null) {\n out.userDefinedMetadata = metadata.userDefinedMetadata;\n }\n if (metadata.modelInitializer != null) {\n out.modelInitializer = metadata.modelInitializer;\n }\n if (metadata.trainingConfig != null) {\n out.trainingConfig = metadata.trainingConfig;\n }\n }\n\n // Load weight data.\n const weightDataBase64 = this.LS.getItem(this.keys.weightData);\n if (weightDataBase64 == null) {\n throw new Error(\n `In local storage, the binary weight values of model ` +\n `'${this.modelPath}' are missing.`);\n }\n out.weightData = base64StringToArrayBuffer(weightDataBase64);\n\n return out;\n }\n}\n\nexport const localStorageRouter: IORouter = (url: string|string[]) => {\n if (!env().getBool('IS_BROWSER')) {\n return null;\n } else {\n if (!Array.isArray(url) && url.startsWith(BrowserLocalStorage.URL_SCHEME)) {\n return browserLocalStorage(\n url.slice(BrowserLocalStorage.URL_SCHEME.length));\n } else {\n return null;\n }\n }\n};\nIORouterRegistry.registerSaveRouter(localStorageRouter);\nIORouterRegistry.registerLoadRouter(localStorageRouter);\n\n/**\n * Factory function for local storage IOHandler.\n *\n * This `IOHandler` supports both `save` and `load`.\n *\n * For each model's saved artifacts, four items are saved to local storage.\n * - `${PATH_SEPARATOR}/${modelPath}/info`: Contains meta-info about the\n * model, such as date saved, type of the topology, size in bytes, etc.\n * - `${PATH_SEPARATOR}/${modelPath}/topology`: Model topology. For Keras-\n * style models, this is a stringized JSON.\n * - `${PATH_SEPARATOR}/${modelPath}/weight_specs`: Weight specs of the\n * model, can be used to decode the saved binary weight values (see\n * item below).\n * - `${PATH_SEPARATOR}/${modelPath}/weight_data`: Concatenated binary\n * weight values, stored as a base64-encoded string.\n *\n * Saving may throw an `Error` if the total size of the artifacts exceed the\n * browser-specific quota.\n *\n * @param modelPath A unique identifier for the model to be saved. Must be a\n * non-empty string.\n * @returns An instance of `IOHandler`, which can be used with, e.g.,\n * `tf.Model.save`.\n */\nexport function browserLocalStorage(modelPath: string): IOHandler {\n return new BrowserLocalStorage(modelPath);\n}\n\nexport class BrowserLocalStorageManager implements ModelStoreManager {\n private readonly LS: Storage;\n\n constructor() {\n assert(\n env().getBool('IS_BROWSER'),\n () => 'Current environment is not a web browser');\n assert(\n typeof window === 'undefined' ||\n typeof window.localStorage !== 'undefined',\n () => 'Current browser does not appear to support localStorage');\n this.LS = window.localStorage;\n }\n\n async listModels(): Promise<{[path: string]: ModelArtifactsInfo}> {\n const out: {[path: string]: ModelArtifactsInfo} = {};\n const prefix = PATH_PREFIX + PATH_SEPARATOR;\n const suffix = PATH_SEPARATOR + INFO_SUFFIX;\n for (let i = 0; i < this.LS.length; ++i) {\n const key = this.LS.key(i);\n if (key.startsWith(prefix) && key.endsWith(suffix)) {\n const modelPath = getModelPathFromKey(key);\n out[modelPath] = JSON.parse(this.LS.getItem(key)) as ModelArtifactsInfo;\n }\n }\n return out;\n }\n\n async removeModel(path: string): Promise {\n path = maybeStripScheme(path);\n const keys = getModelKeys(path);\n if (this.LS.getItem(keys.info) == null) {\n throw new Error(`Cannot find model at path '${path}'`);\n }\n const info = JSON.parse(this.LS.getItem(keys.info)) as ModelArtifactsInfo;\n removeItems(keys);\n return info;\n }\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/**\n * Classes and functions for model management across multiple storage mediums.\n *\n * Supported client actions:\n * - Listing models on all registered storage mediums.\n * - Remove model by URL from any registered storage mediums, by using URL\n * string.\n * - Moving or copying model from one path to another in the same medium or from\n * one medium to another, by using URL strings.\n */\n\nimport {assert} from '../util';\n\nimport {IORouterRegistry} from './router_registry';\nimport {ModelArtifactsInfo, ModelStoreManager} from './types';\n\nconst URL_SCHEME_SUFFIX = '://';\n\nexport class ModelStoreManagerRegistry {\n // Singleton instance.\n private static instance: ModelStoreManagerRegistry;\n\n private managers: {[scheme: string]: ModelStoreManager};\n\n private constructor() {\n this.managers = {};\n }\n\n private static getInstance(): ModelStoreManagerRegistry {\n if (ModelStoreManagerRegistry.instance == null) {\n ModelStoreManagerRegistry.instance = new ModelStoreManagerRegistry();\n }\n return ModelStoreManagerRegistry.instance;\n }\n\n /**\n * Register a save-handler router.\n *\n * @param saveRouter A function that maps a URL-like string onto an instance\n * of `IOHandler` with the `save` method defined or `null`.\n */\n static registerManager(scheme: string, manager: ModelStoreManager) {\n assert(scheme != null, () => 'scheme must not be undefined or null.');\n if (scheme.endsWith(URL_SCHEME_SUFFIX)) {\n scheme = scheme.slice(0, scheme.indexOf(URL_SCHEME_SUFFIX));\n }\n assert(scheme.length > 0, () => 'scheme must not be an empty string.');\n const registry = ModelStoreManagerRegistry.getInstance();\n assert(\n registry.managers[scheme] == null,\n () => `A model store manager is already registered for scheme '${\n scheme}'.`);\n registry.managers[scheme] = manager;\n }\n\n static getManager(scheme: string): ModelStoreManager {\n const manager = this.getInstance().managers[scheme];\n if (manager == null) {\n throw new Error(`Cannot find model manager for scheme '${scheme}'`);\n }\n return manager;\n }\n\n static getSchemes(): string[] {\n return Object.keys(this.getInstance().managers);\n }\n}\n\n/**\n * Helper method for parsing a URL string into a scheme and a path.\n *\n * @param url E.g., 'localstorage://my-model'\n * @returns A dictionary with two fields: scheme and path.\n * Scheme: e.g., 'localstorage' in the example above.\n * Path: e.g., 'my-model' in the example above.\n */\nfunction parseURL(url: string): {scheme: string, path: string} {\n if (url.indexOf(URL_SCHEME_SUFFIX) === -1) {\n throw new Error(\n `The url string provided does not contain a scheme. ` +\n `Supported schemes are: ` +\n `${ModelStoreManagerRegistry.getSchemes().join(',')}`);\n }\n return {\n scheme: url.split(URL_SCHEME_SUFFIX)[0],\n path: url.split(URL_SCHEME_SUFFIX)[1],\n };\n}\n\nasync function cloneModelInternal(\n sourceURL: string, destURL: string,\n deleteSource = false): Promise {\n assert(\n sourceURL !== destURL,\n () => `Old path and new path are the same: '${sourceURL}'`);\n\n const loadHandlers = IORouterRegistry.getLoadHandlers(sourceURL);\n assert(\n loadHandlers.length > 0,\n () => `Copying failed because no load handler is found for source URL ${\n sourceURL}.`);\n assert(\n loadHandlers.length < 2,\n () => `Copying failed because more than one (${loadHandlers.length}) ` +\n `load handlers for source URL ${sourceURL}.`);\n const loadHandler = loadHandlers[0];\n\n const saveHandlers = IORouterRegistry.getSaveHandlers(destURL);\n assert(\n saveHandlers.length > 0,\n () => `Copying failed because no save handler is found for destination ` +\n `URL ${destURL}.`);\n assert(\n saveHandlers.length < 2,\n () => `Copying failed because more than one (${loadHandlers.length}) ` +\n `save handlers for destination URL ${destURL}.`);\n const saveHandler = saveHandlers[0];\n\n const sourceScheme = parseURL(sourceURL).scheme;\n const sourcePath = parseURL(sourceURL).path;\n const sameMedium = sourceScheme === parseURL(sourceURL).scheme;\n\n const modelArtifacts = await loadHandler.load();\n\n // If moving within the same storage medium, remove the old model as soon as\n // the loading is done. Without doing this, it is possible that the combined\n // size of the two models will cause the cloning to fail.\n if (deleteSource && sameMedium) {\n await ModelStoreManagerRegistry.getManager(sourceScheme)\n .removeModel(sourcePath);\n }\n\n const saveResult = await saveHandler.save(modelArtifacts);\n\n // If moving between mediums, the deletion is done after the save succeeds.\n // This guards against the case in which saving to the destination medium\n // fails.\n if (deleteSource && !sameMedium) {\n await ModelStoreManagerRegistry.getManager(sourceScheme)\n .removeModel(sourcePath);\n }\n\n return saveResult.modelArtifactsInfo;\n}\n\n/**\n * List all models stored in registered storage mediums.\n *\n * For a web browser environment, the registered mediums are Local Storage and\n * IndexedDB.\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Delete the model.\n * await tf.io.removeModel('localstorage://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n * ```\n *\n * @returns A `Promise` of a dictionary mapping URLs of existing models to\n * their model artifacts info. URLs include medium-specific schemes, e.g.,\n * 'indexeddb://my/model/1'. Model artifacts info include type of the\n * model's topology, byte sizes of the topology, weights, etc.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function listModels(): Promise<{[url: string]: ModelArtifactsInfo}> {\n const schemes = ModelStoreManagerRegistry.getSchemes();\n const out: {[url: string]: ModelArtifactsInfo} = {};\n for (const scheme of schemes) {\n const schemeOut =\n await ModelStoreManagerRegistry.getManager(scheme).listModels();\n for (const path in schemeOut) {\n const url = scheme + URL_SCHEME_SUFFIX + path;\n out[url] = schemeOut[path];\n }\n }\n return out;\n}\n\n/**\n * Remove a model specified by URL from a reigstered storage medium.\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Delete the model.\n * await tf.io.removeModel('localstorage://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n * ```\n *\n * @param url A URL to a stored model, with a scheme prefix, e.g.,\n * 'localstorage://my-model-1', 'indexeddb://my/model/2'.\n * @returns ModelArtifactsInfo of the deleted model (if and only if deletion\n * is successful).\n * @throws Error if deletion fails, e.g., if no model exists at `path`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function removeModel(url: string): Promise {\n const schemeAndPath = parseURL(url);\n const manager = ModelStoreManagerRegistry.getManager(schemeAndPath.scheme);\n return manager.removeModel(schemeAndPath.path);\n}\n\n/**\n * Copy a model from one URL to another.\n *\n * This function supports:\n *\n * 1. Copying within a storage medium, e.g.,\n * `tf.io.copyModel('localstorage://model-1', 'localstorage://model-2')`\n * 2. Copying between two storage mediums, e.g.,\n * `tf.io.copyModel('localstorage://model-1', 'indexeddb://model-1')`\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Copy the model, from Local Storage to IndexedDB.\n * await tf.io.copyModel(\n * 'localstorage://demo/management/model1',\n * 'indexeddb://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Remove both models.\n * await tf.io.removeModel('localstorage://demo/management/model1');\n * await tf.io.removeModel('indexeddb://demo/management/model1');\n * ```\n *\n * @param sourceURL Source URL of copying.\n * @param destURL Destination URL of copying.\n * @returns ModelArtifactsInfo of the copied model (if and only if copying\n * is successful).\n * @throws Error if copying fails, e.g., if no model exists at `sourceURL`, or\n * if `oldPath` and `newPath` are identical.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function copyModel(\n sourceURL: string, destURL: string): Promise {\n const deleteSource = false;\n return cloneModelInternal(sourceURL, destURL, deleteSource);\n}\n\n/**\n * Move a model from one URL to another.\n *\n * This function supports:\n *\n * 1. Moving within a storage medium, e.g.,\n * `tf.io.moveModel('localstorage://model-1', 'localstorage://model-2')`\n * 2. Moving between two storage mediums, e.g.,\n * `tf.io.moveModel('localstorage://model-1', 'indexeddb://model-1')`\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Move the model, from Local Storage to IndexedDB.\n * await tf.io.moveModel(\n * 'localstorage://demo/management/model1',\n * 'indexeddb://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Remove the moved model.\n * await tf.io.removeModel('indexeddb://demo/management/model1');\n * ```\n *\n * @param sourceURL Source URL of moving.\n * @param destURL Destination URL of moving.\n * @returns ModelArtifactsInfo of the copied model (if and only if copying\n * is successful).\n * @throws Error if moving fails, e.g., if no model exists at `sourceURL`, or\n * if `oldPath` and `newPath` are identical.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function moveModel(\n sourceURL: string, destURL: string): Promise {\n const deleteSource = true;\n return cloneModelInternal(sourceURL, destURL, deleteSource);\n}\n\nexport {moveModel, copyModel, removeModel, listModels};\n", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport '../flags';\n\nimport {env} from '../environment';\nimport {BrowserIndexedDB, BrowserIndexedDBManager} from '../io/indexed_db';\nimport {BrowserLocalStorage, BrowserLocalStorageManager} from '../io/local_storage';\nimport {ModelStoreManagerRegistry} from '../io/model_management';\n\nimport {Platform} from './platform';\n\nexport class PlatformBrowser implements Platform {\n // According to the spec, the built-in encoder can do only UTF-8 encoding.\n // https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder/TextEncoder\n private textEncoder: TextEncoder;\n\n fetch(path: string, init?: RequestInit): Promise {\n return fetch(path, init);\n }\n\n now(): number {\n return performance.now();\n }\n\n encode(text: string, encoding: string): Uint8Array {\n if (encoding !== 'utf-8' && encoding !== 'utf8') {\n throw new Error(\n `Browser's encoder only supports utf-8, but got ${encoding}`);\n }\n if (this.textEncoder == null) {\n this.textEncoder = new TextEncoder();\n }\n return this.textEncoder.encode(text);\n }\n decode(bytes: Uint8Array, encoding: string): string {\n return new TextDecoder(encoding).decode(bytes);\n }\n}\n\nif (env().get('IS_BROWSER')) {\n env().setPlatform('browser', new PlatformBrowser());\n\n // Register LocalStorage IOHandler\n try {\n ModelStoreManagerRegistry.registerManager(\n BrowserLocalStorage.URL_SCHEME, new BrowserLocalStorageManager());\n } catch (err) {\n }\n\n // Register IndexedDB IOHandler\n try {\n ModelStoreManagerRegistry.registerManager(\n BrowserIndexedDB.URL_SCHEME, new BrowserIndexedDBManager());\n } catch (err) {\n }\n}\n", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {env} from '../environment';\n\nimport {Platform} from './platform';\n\n// We are wrapping this within an object so it can be stubbed by Jasmine.\nexport const getNodeFetch = {\n // tslint:disable-next-line:no-require-imports\n importFetch: () => require('node-fetch')\n};\n\ntype FetchFn = (url: string, init?: RequestInit) => Promise;\nlet systemFetch: FetchFn;\n// These getters and setters are for testing so we don't export a mutable\n// variable.\nexport function resetSystemFetch() {\n systemFetch = null;\n}\nexport function setSystemFetch(fetchFn: FetchFn) {\n systemFetch = fetchFn;\n}\nexport function getSystemFetch(): FetchFn {\n return systemFetch;\n}\n\nexport class PlatformNode implements Platform {\n private textEncoder: TextEncoder;\n // tslint:disable-next-line:no-any\n util: any;\n\n constructor() {\n // tslint:disable-next-line:no-require-imports\n this.util = require('util');\n // According to the spec, the built-in encoder can do only UTF-8 encoding.\n // https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder/TextEncoder\n this.textEncoder = new this.util.TextEncoder();\n }\n\n fetch(path: string, requestInits?: RequestInit): Promise {\n if (env().global.fetch != null) {\n return env().global.fetch(path, requestInits);\n }\n\n if (systemFetch == null) {\n systemFetch = getNodeFetch.importFetch();\n }\n return systemFetch(path, requestInits);\n }\n\n now(): number {\n const time = process.hrtime();\n return time[0] * 1000 + time[1] / 1000000;\n }\n\n encode(text: string, encoding: string): Uint8Array {\n if (encoding !== 'utf-8' && encoding !== 'utf8') {\n throw new Error(\n `Node built-in encoder only supports utf-8, but got ${encoding}`);\n }\n return this.textEncoder.encode(text);\n }\n decode(bytes: Uint8Array, encoding: string): string {\n if (bytes.length === 0) {\n return '';\n }\n return new this.util.TextDecoder(encoding).decode(bytes);\n }\n}\n\nif (env().get('IS_NODE')) {\n env().setPlatform('node', new PlatformNode());\n}\n", "/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {TensorBuffer} from '../tensor';\nimport {DataType, DataTypeMap, Rank, ShapeMap} from '../types';\nimport * as util from '../util';\n\n/**\n * Creates an empty `tf.TensorBuffer` with the specified `shape` and `dtype`.\n *\n * The values are stored in CPU as `TypedArray`. Fill the buffer using\n * `buffer.set()`, or by modifying directly `buffer.values`.\n *\n * When done, call `buffer.toTensor()` to get an immutable `tf.Tensor` with\n * those values.\n *\n * ```js\n * // Create a buffer and set values at particular indices.\n * const buffer = tf.buffer([2, 2]);\n * buffer.set(3, 0, 0);\n * buffer.set(5, 1, 0);\n *\n * // Convert the buffer back to a tensor.\n * buffer.toTensor().print();\n * ```\n *\n * @param shape An array of integers defining the output tensor shape.\n * @param dtype The dtype of the buffer. Defaults to 'float32'.\n * @param values The values of the buffer as `TypedArray`. Defaults to\n * zeros.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function buffer(\n shape: ShapeMap[R], dtype: D = 'float32' as D,\n values?: DataTypeMap[D]): TensorBuffer {\n dtype = dtype || 'float32' as D;\n util.assertNonNegativeIntegerDimensions(shape);\n return new TensorBuffer(shape, dtype, values);\n}\n", "/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Cast, CastAttrs, CastInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {DataType, TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\n\n/**\n * Casts a `tf.Tensor` to a new dtype.\n *\n * ```js\n * const x = tf.tensor1d([1.5, 2.5, 3]);\n * tf.cast(x, 'int32').print();\n * ```\n * @param x The input tensor to be casted.\n * @param dtype The dtype to cast the input tensor to.\n *\n * @doc {heading: 'Tensors', subheading: 'Transformations'}\n */\nfunction cast_(x: T|TensorLike, dtype: DataType): T {\n const $x = convertToTensor(x, 'x', 'cast');\n\n // Sanity checks.\n if (!util.isValidDtype(dtype)) {\n throw new Error(`Failed to cast to unknown dtype ${dtype}`);\n }\n if (dtype === 'string' && $x.dtype !== 'string' ||\n dtype !== 'string' && $x.dtype === 'string') {\n throw new Error('Only strings can be casted to strings');\n }\n\n const inputs: CastInputs = {x: $x};\n const attrs: CastAttrs = {dtype};\n\n return ENGINE.runKernel(\n Cast, inputs as {} as NamedTensorMap, attrs as {} as NamedAttrMap);\n}\n\nexport const cast = op({cast_});\n", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Identity, IdentityInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Creates a new tensor with the same values and shape as the specified\n * tensor.\n *\n * ```js\n * const x = tf.tensor([1, 2]);\n *\n * x.clone().print();\n * ```\n *\n * @param x The tensor to clone.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction clone_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'clone', 'string_or_numeric');\n const inputs: IdentityInputs = {x: $x};\n\n // Note this op is called tf.identity in python. Hence the kernel name used\n // here.\n return ENGINE.runKernel(Identity, inputs as {} as NamedTensorMap);\n}\n\nexport const clone = op({clone_});\n", "/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\n\n/**\n * Prints information about the `tf.Tensor` including its data.\n *\n * ```js\n * const verbose = true;\n * tf.tensor2d([1, 2, 3, 4], [2, 2]).print(verbose);\n * ```\n * @param x The tensor to be printed.\n * @param verbose Whether to print verbose information about the ` Tensor`,\n * including dtype and size.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function print(x: T, verbose = false): void {\n console.log(x.toString(verbose));\n}\n", "/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// Required side effectful code for tfjs-core\n\n// Set up Engine and ENV\nimport {getOrMakeEngine} from './engine';\ngetOrMakeEngine();\n\n// Register backend-agnostic flags.\nimport './flags';\n// Register platforms\nimport './platforms/platform_browser';\nimport './platforms/platform_node';\n\n// Set up OpHandler\nimport {buffer} from './ops/buffer';\nimport {cast} from './ops/cast';\nimport {clone} from './ops/clone';\nimport {print} from './ops/print';\nimport {OpHandler, setOpHandler} from './tensor';\nconst opHandler: OpHandler = {\n buffer,\n cast,\n clone,\n print\n};\nsetOpHandler(opHandler);\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// Importing local_storage and indexed_db is necessary for the routers to be\n// registered.\nimport './indexed_db';\nimport './local_storage';\n\nimport {browserFiles} from './browser_files';\nimport {browserHTTPRequest, http, isHTTPScheme} from './http';\nimport {concatenateArrayBuffers, decodeWeights, encodeWeights, getModelArtifactsForJSON, getModelArtifactsInfoForJSON} from './io_utils';\nimport {fromMemory, withSaveHandler} from './passthrough';\nimport {getLoadHandlers, getSaveHandlers, registerLoadRouter, registerSaveRouter} from './router_registry';\nimport {IOHandler, LoadHandler, LoadOptions, ModelArtifacts, ModelArtifactsInfo, ModelJSON, ModelStoreManager, OnProgressCallback, RequestDetails, SaveConfig, SaveHandler, SaveResult, TrainingConfig, WeightGroup, WeightsManifestConfig, WeightsManifestEntry} from './types';\nimport {loadWeights, weightsLoaderFactory} from './weights_loader';\n\nexport {copyModel, listModels, moveModel, removeModel} from './model_management';\nexport {\n browserFiles,\n browserHTTPRequest,\n concatenateArrayBuffers,\n decodeWeights,\n encodeWeights,\n fromMemory,\n getLoadHandlers,\n getModelArtifactsForJSON,\n getModelArtifactsInfoForJSON,\n getSaveHandlers,\n http,\n IOHandler,\n isHTTPScheme,\n LoadHandler,\n LoadOptions,\n loadWeights,\n ModelArtifacts,\n ModelArtifactsInfo,\n ModelJSON,\n ModelStoreManager,\n OnProgressCallback,\n registerLoadRouter,\n registerSaveRouter,\n RequestDetails,\n SaveConfig,\n SaveHandler,\n SaveResult,\n TrainingConfig,\n WeightGroup,\n weightsLoaderFactory,\n WeightsManifestConfig,\n WeightsManifestEntry,\n withSaveHandler\n};\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/**\n * IOHandlers related to files, such as browser-triggered file downloads,\n * user-selected files in browser.\n */\n\nimport '../flags';\nimport {env} from '../environment';\n\nimport {basename, concatenateArrayBuffers, getModelArtifactsForJSON, getModelArtifactsInfoForJSON, getModelJSONForModelArtifacts} from './io_utils';\nimport {IORouter, IORouterRegistry} from './router_registry';\nimport {IOHandler, ModelArtifacts, ModelJSON, SaveResult, WeightsManifestConfig, WeightsManifestEntry} from './types';\n\nconst DEFAULT_FILE_NAME_PREFIX = 'model';\nconst DEFAULT_JSON_EXTENSION_NAME = '.json';\nconst DEFAULT_WEIGHT_DATA_EXTENSION_NAME = '.weights.bin';\n\nfunction defer(f: () => T): Promise {\n return new Promise(resolve => setTimeout(resolve)).then(f);\n}\n\nexport class BrowserDownloads implements IOHandler {\n private readonly modelJsonFileName: string;\n private readonly weightDataFileName: string;\n private readonly modelJsonAnchor: HTMLAnchorElement;\n private readonly weightDataAnchor: HTMLAnchorElement;\n\n static readonly URL_SCHEME = 'downloads://';\n\n constructor(fileNamePrefix?: string) {\n if (!env().getBool('IS_BROWSER')) {\n // TODO(cais): Provide info on what IOHandlers are available under the\n // current environment.\n throw new Error(\n 'browserDownloads() cannot proceed because the current environment ' +\n 'is not a browser.');\n }\n\n if (fileNamePrefix.startsWith(BrowserDownloads.URL_SCHEME)) {\n fileNamePrefix = fileNamePrefix.slice(BrowserDownloads.URL_SCHEME.length);\n }\n if (fileNamePrefix == null || fileNamePrefix.length === 0) {\n fileNamePrefix = DEFAULT_FILE_NAME_PREFIX;\n }\n\n this.modelJsonFileName = fileNamePrefix + DEFAULT_JSON_EXTENSION_NAME;\n this.weightDataFileName =\n fileNamePrefix + DEFAULT_WEIGHT_DATA_EXTENSION_NAME;\n }\n\n async save(modelArtifacts: ModelArtifacts): Promise {\n if (typeof (document) === 'undefined') {\n throw new Error(\n 'Browser downloads are not supported in ' +\n 'this environment since `document` is not present');\n }\n const weightsURL = window.URL.createObjectURL(new Blob(\n [modelArtifacts.weightData], {type: 'application/octet-stream'}));\n\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error(\n 'BrowserDownloads.save() does not support saving model topology ' +\n 'in binary formats yet.');\n } else {\n const weightsManifest: WeightsManifestConfig = [{\n paths: ['./' + this.weightDataFileName],\n weights: modelArtifacts.weightSpecs\n }];\n const modelJSON: ModelJSON =\n getModelJSONForModelArtifacts(modelArtifacts, weightsManifest);\n\n const modelJsonURL = window.URL.createObjectURL(\n new Blob([JSON.stringify(modelJSON)], {type: 'application/json'}));\n\n // If anchor elements are not provided, create them without attaching them\n // to parents, so that the downloaded file names can be controlled.\n const jsonAnchor = this.modelJsonAnchor == null ?\n document.createElement('a') :\n this.modelJsonAnchor;\n jsonAnchor.download = this.modelJsonFileName;\n jsonAnchor.href = modelJsonURL;\n // Trigger downloads by evoking a click event on the download anchors.\n // When multiple downloads are started synchronously, Firefox will only\n // save the last one.\n await defer(() => jsonAnchor.dispatchEvent(new MouseEvent('click')));\n\n if (modelArtifacts.weightData != null) {\n const weightDataAnchor = this.weightDataAnchor == null ?\n document.createElement('a') :\n this.weightDataAnchor;\n weightDataAnchor.download = this.weightDataFileName;\n weightDataAnchor.href = weightsURL;\n await defer(\n () => weightDataAnchor.dispatchEvent(new MouseEvent('click')));\n }\n\n return {modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts)};\n }\n }\n}\n\nclass BrowserFiles implements IOHandler {\n private readonly jsonFile: File;\n private readonly weightsFiles: File[];\n\n constructor(files: File[]) {\n if (files == null || files.length < 1) {\n throw new Error(\n `When calling browserFiles, at least 1 file is required, ` +\n `but received ${files}`);\n }\n this.jsonFile = files[0];\n this.weightsFiles = files.slice(1);\n }\n\n async load(): Promise {\n return new Promise((resolve, reject) => {\n const jsonReader = new FileReader();\n jsonReader.onload = (event: Event) => {\n // tslint:disable-next-line:no-any\n const modelJSON = JSON.parse((event.target as any).result) as ModelJSON;\n\n const modelTopology = modelJSON.modelTopology;\n if (modelTopology == null) {\n reject(new Error(`modelTopology field is missing from file ${\n this.jsonFile.name}`));\n return;\n }\n\n const weightsManifest = modelJSON.weightsManifest;\n if (weightsManifest == null) {\n reject(new Error(`weightManifest field is missing from file ${\n this.jsonFile.name}`));\n return;\n }\n\n if (this.weightsFiles.length === 0) {\n resolve({modelTopology});\n return;\n }\n\n const modelArtifactsPromise = getModelArtifactsForJSON(\n modelJSON, (weightsManifest) => this.loadWeights(weightsManifest));\n resolve(modelArtifactsPromise);\n };\n\n jsonReader.onerror = error => reject(\n `Failed to read model topology and weights manifest JSON ` +\n `from file '${this.jsonFile.name}'. BrowserFiles supports loading ` +\n `Keras-style tf.Model artifacts only.`);\n jsonReader.readAsText(this.jsonFile);\n });\n }\n\n private loadWeights(weightsManifest: WeightsManifestConfig): Promise<[\n /* weightSpecs */ WeightsManifestEntry[], /* weightData */ ArrayBuffer\n ]> {\n const weightSpecs: WeightsManifestEntry[] = [];\n const paths: string[] = [];\n for (const entry of weightsManifest) {\n weightSpecs.push(...entry.weights);\n paths.push(...entry.paths);\n }\n\n const pathToFile: {[path: string]: File} =\n this.checkManifestAndWeightFiles(weightsManifest);\n\n const promises: Array> =\n paths.map(path => this.loadWeightsFile(path, pathToFile[path]));\n\n return Promise.all(promises).then(\n buffers => [weightSpecs, concatenateArrayBuffers(buffers)]);\n }\n\n private loadWeightsFile(path: string, file: File): Promise {\n return new Promise((resolve, reject) => {\n const weightFileReader = new FileReader();\n weightFileReader.onload = (event: Event) => {\n // tslint:disable-next-line:no-any\n const weightData = (event.target as any).result as ArrayBuffer;\n resolve(weightData);\n };\n weightFileReader.onerror = error =>\n reject(`Failed to weights data from file of path '${path}'.`);\n weightFileReader.readAsArrayBuffer(file);\n });\n }\n\n /**\n * Check the compatibility between weights manifest and weight files.\n */\n private checkManifestAndWeightFiles(manifest: WeightsManifestConfig):\n {[path: string]: File} {\n const basenames: string[] = [];\n const fileNames = this.weightsFiles.map(file => basename(file.name));\n const pathToFile: {[path: string]: File} = {};\n for (const group of manifest) {\n group.paths.forEach(path => {\n const pathBasename = basename(path);\n if (basenames.indexOf(pathBasename) !== -1) {\n throw new Error(\n `Duplicate file basename found in weights manifest: ` +\n `'${pathBasename}'`);\n }\n basenames.push(pathBasename);\n if (fileNames.indexOf(pathBasename) === -1) {\n throw new Error(\n `Weight file with basename '${pathBasename}' is not provided.`);\n } else {\n pathToFile[path] = this.weightsFiles[fileNames.indexOf(pathBasename)];\n }\n });\n }\n\n if (basenames.length !== this.weightsFiles.length) {\n throw new Error(\n `Mismatch in the number of files in weights manifest ` +\n `(${basenames.length}) and the number of weight files provided ` +\n `(${this.weightsFiles.length}).`);\n }\n return pathToFile;\n }\n}\n\nexport const browserDownloadsRouter: IORouter = (url: string|string[]) => {\n if (!env().getBool('IS_BROWSER')) {\n return null;\n } else {\n if (!Array.isArray(url) && url.startsWith(BrowserDownloads.URL_SCHEME)) {\n return browserDownloads(url.slice(BrowserDownloads.URL_SCHEME.length));\n } else {\n return null;\n }\n }\n};\nIORouterRegistry.registerSaveRouter(browserDownloadsRouter);\n\n/**\n * Creates an IOHandler that triggers file downloads from the browser.\n *\n * The returned `IOHandler` instance can be used as model exporting methods such\n * as `tf.Model.save` and supports only saving.\n *\n * ```js\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * const saveResult = await model.save('downloads://mymodel');\n * // This will trigger downloading of two files:\n * // 'mymodel.json' and 'mymodel.weights.bin'.\n * console.log(saveResult);\n * ```\n *\n * @param fileNamePrefix Prefix name of the files to be downloaded. For use with\n * `tf.Model`, `fileNamePrefix` should follow either of the following two\n * formats:\n * 1. `null` or `undefined`, in which case the default file\n * names will be used:\n * - 'model.json' for the JSON file containing the model topology and\n * weights manifest.\n * - 'model.weights.bin' for the binary file containing the binary weight\n * values.\n * 2. A single string or an Array of a single string, as the file name prefix.\n * For example, if `'foo'` is provided, the downloaded JSON\n * file and binary weights file will be named 'foo.json' and\n * 'foo.weights.bin', respectively.\n * @param config Additional configuration for triggering downloads.\n * @returns An instance of `BrowserDownloads` `IOHandler`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Loading',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nexport function browserDownloads(fileNamePrefix = 'model'): IOHandler {\n return new BrowserDownloads(fileNamePrefix);\n}\n\n/**\n * Creates an IOHandler that loads model artifacts from user-selected files.\n *\n * This method can be used for loading from files such as user-selected files\n * in the browser.\n * When used in conjunction with `tf.loadLayersModel`, an instance of\n * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.\n *\n * ```js\n * // Note: This code snippet won't run properly without the actual file input\n * // elements in the HTML DOM.\n *\n * // Suppose there are two HTML file input (``)\n * // elements.\n * const uploadJSONInput = document.getElementById('upload-json');\n * const uploadWeightsInput = document.getElementById('upload-weights');\n * const model = await tf.loadLayersModel(tf.io.browserFiles(\n * [uploadJSONInput.files[0], uploadWeightsInput.files[0]]));\n * ```\n *\n * @param files `File`s to load from. Currently, this function supports only\n * loading from files that contain Keras-style models (i.e., `tf.Model`s), for\n * which an `Array` of `File`s is expected (in that order):\n * - A JSON file containing the model topology and weight manifest.\n * - Optionally, One or more binary files containing the binary weights.\n * These files must have names that match the paths in the `weightsManifest`\n * contained by the aforementioned JSON file, or errors will be thrown\n * during loading. These weights files have the same format as the ones\n * generated by `tensorflowjs_converter` that comes with the `tensorflowjs`\n * Python PIP package. If no weights files are provided, only the model\n * topology will be loaded from the JSON file above.\n * @returns An instance of `Files` `IOHandler`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Loading',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nexport function browserFiles(files: File[]): IOHandler {\n return new BrowserFiles(files);\n}\n", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {assert} from '../util';\n\nimport {OnProgressCallback} from './types';\n\n/**\n * Monitor Promise.all progress, fire onProgress callback function.\n *\n * @param promises Promise list going to be monitored\n * @param onProgress Callback function. Fired when a promise resolved.\n * @param startFraction Optional fraction start. Default to 0.\n * @param endFraction Optional fraction end. Default to 1.\n */\nexport function monitorPromisesProgress(\n promises: Array>, onProgress: OnProgressCallback,\n startFraction?: number, endFraction?: number) {\n checkPromises(promises);\n startFraction = startFraction == null ? 0 : startFraction;\n endFraction = endFraction == null ? 1 : endFraction;\n checkFraction(startFraction, endFraction);\n let resolvedPromise = 0;\n\n const registerMonitor = (promise: Promise<{}>) => {\n promise.then(value => {\n const fraction = startFraction +\n ++resolvedPromise / promises.length * (endFraction - startFraction);\n // pass fraction as parameter to callback function.\n onProgress(fraction);\n return value;\n });\n return promise;\n };\n\n function checkPromises(promises: Array>): void {\n assert(\n promises != null && Array.isArray(promises) && promises.length > 0,\n () => 'promises must be a none empty array');\n }\n\n function checkFraction(startFraction: number, endFraction: number): void {\n assert(\n startFraction >= 0 && startFraction <= 1,\n () => `Progress fraction must be in range [0, 1], but ` +\n `got startFraction ${startFraction}`);\n assert(\n endFraction >= 0 && endFraction <= 1,\n () => `Progress fraction must be in range [0, 1], but ` +\n `got endFraction ${endFraction}`);\n assert(\n endFraction >= startFraction,\n () => `startFraction must be no more than endFraction, but ` +\n `got startFraction ${startFraction} and endFraction ` +\n `${endFraction}`);\n }\n\n return Promise.all(promises.map(registerMonitor));\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env} from '../environment';\n\nimport {NamedTensorMap} from '../tensor_types';\nimport * as util from '../util';\nimport {decodeWeights} from './io_utils';\nimport {monitorPromisesProgress} from './progress';\nimport {DTYPE_VALUE_SIZE_MAP, LoadOptions, WeightsManifestConfig, WeightsManifestEntry} from './types';\n\n/**\n * Reads binary weights data from a number of URLs.\n *\n * @param fetchURLs URLs to send the HTTP requests at, using `fetch` calls.\n * @param requestOptions RequestInit (options) for the HTTP requests.\n * @param fetchFunc Optional overriding value for the `window.fetch` function.\n * @param onProgress Optional, progress callback function, fired periodically\n * before the load is completed.\n * @returns A `Promise` of an Array of `ArrayBuffer`. The Array has the same\n * length as `fetchURLs`.\n */\nexport async function loadWeightsAsArrayBuffer(\n fetchURLs: string[], loadOptions?: LoadOptions): Promise {\n if (loadOptions == null) {\n loadOptions = {};\n }\n\n const fetchFunc = loadOptions.fetchFunc == null ? env().platform.fetch :\n loadOptions.fetchFunc;\n\n // Create the requests for all of the weights in parallel.\n const requests = fetchURLs.map(\n fetchURL =>\n fetchFunc(fetchURL, loadOptions.requestInit, {isBinary: true}));\n\n const fetchStartFraction = 0;\n const fetchEndFraction = 0.5;\n\n const responses = loadOptions.onProgress == null ?\n await Promise.all(requests) :\n await monitorPromisesProgress(\n requests, loadOptions.onProgress, fetchStartFraction,\n fetchEndFraction);\n\n const bufferPromises = responses.map(response => response.arrayBuffer());\n\n const bufferStartFraction = 0.5;\n const bufferEndFraction = 1;\n\n const buffers = loadOptions.onProgress == null ?\n await Promise.all(bufferPromises) :\n await monitorPromisesProgress(\n bufferPromises, loadOptions.onProgress, bufferStartFraction,\n bufferEndFraction);\n return buffers;\n}\n\n/**\n * Reads a weights manifest JSON configuration, fetches the weights and\n * returns them as `Tensor`s.\n *\n * @param manifest The weights manifest JSON.\n * @param filePathPrefix The path prefix for filenames given in the manifest.\n * Defaults to the empty string.\n * @param weightNames The names of the weights to be fetched.\n */\nexport async function loadWeights(\n manifest: WeightsManifestConfig, filePathPrefix = '',\n weightNames?: string[],\n requestInit?: RequestInit): Promise {\n // TODO(nsthorat): Groups are currently fetched atomically. If you need a\n // single weight from a group, the whole group will be fetched. At a future\n // date, we should support fetching only the individual shards within a\n // group that are needed to reconstruct the requested weight.\n // TODO(cais): Use `decodeWeights` for implementation.\n\n const fetchWeights = (fetchUrls: string[]) =>\n loadWeightsAsArrayBuffer(fetchUrls, {requestInit});\n const loadWeights = weightsLoaderFactory(fetchWeights);\n\n return loadWeights(manifest, filePathPrefix, weightNames);\n}\n\n/**\n * Creates a function, which reads a weights manifest JSON configuration,\n * fetches the weight files using the specified function and returns them as\n * `Tensor`s.\n *\n * ```js\n * // example for creating a nodejs weight loader, which reads the weight files\n * // from disk using fs.readFileSync\n *\n * import * as fs from 'fs'\n *\n * const fetchWeightsFromDisk = (filePaths: string[]) =>\n * filePaths.map(filePath => fs.readFileSync(filePath).buffer)\n *\n * const loadWeights = tf.io.weightsLoaderFactory(fetchWeightsFromDisk)\n *\n * const manifest = JSON.parse(\n * fs.readFileSync('./my_model-weights_manifest').toString()\n * )\n * const weightMap = await loadWeights(manifest, './')\n * ```\n * @param fetchWeightsFunction The function used for fetching the weight files.\n * @returns Weight loading function.\n */\nexport function weightsLoaderFactory(\n fetchWeightsFunction: (fetchUrls: string[]) => Promise):\n (manifest: WeightsManifestConfig, filePathPrefix?: string,\n weightNames?: string[]) => Promise {\n return async(\n manifest: WeightsManifestConfig, filePathPrefix = '',\n weightNames?: string[]): Promise => {\n // Collect all the groups, weights, and their relative offsets to be\n // fetched.\n const groupIndicesToFetchMap = manifest.map(() => false);\n const groupWeightsToFetch: {\n [group: number]: Array<{\n manifestEntry: WeightsManifestEntry; groupOffset: number;\n sizeBytes: number;\n }>\n } = {};\n const weightsFound =\n weightNames != null ? weightNames.map(() => false) : [];\n const allManifestWeightNames: string[] = [];\n manifest.forEach((manifestGroupConfig, groupIndex) => {\n let groupOffset = 0;\n manifestGroupConfig.weights.forEach(weightsEntry => {\n const rawDtype = ('quantization' in weightsEntry) ?\n weightsEntry.quantization.dtype :\n weightsEntry.dtype;\n\n const weightsBytes = DTYPE_VALUE_SIZE_MAP[rawDtype] *\n util.sizeFromShape(weightsEntry.shape);\n\n const enqueueWeightsForFetchingFn = () => {\n groupIndicesToFetchMap[groupIndex] = true;\n if (groupWeightsToFetch[groupIndex] == null) {\n groupWeightsToFetch[groupIndex] = [];\n }\n\n groupWeightsToFetch[groupIndex].push({\n manifestEntry: weightsEntry,\n groupOffset,\n sizeBytes: weightsBytes\n });\n };\n\n if (weightNames != null) {\n weightNames.forEach((weightName, weightIndex) => {\n if (weightName === weightsEntry.name) {\n enqueueWeightsForFetchingFn();\n weightsFound[weightIndex] = true;\n }\n });\n } else {\n enqueueWeightsForFetchingFn();\n }\n\n allManifestWeightNames.push(weightsEntry.name);\n groupOffset += weightsBytes;\n });\n });\n\n if (!weightsFound.every(found => found)) {\n const weightsNotFound = weightNames.filter((_, i) => !weightsFound[i]);\n throw new Error(\n `Could not find weights in manifest with names: ` +\n `${weightsNotFound.join(', ')}. \\n` +\n `Manifest JSON has weights with names: ` +\n `${allManifestWeightNames.join(', ')}.`);\n }\n\n // Convert the one-hot boolean groupId => shouldFetch map to a list of group\n // IDs.\n const groupIndicesToFetch =\n groupIndicesToFetchMap.reduce((accumulator, shouldFetch, i) => {\n if (shouldFetch) {\n accumulator.push(i);\n }\n return accumulator;\n }, []);\n\n const fetchUrls: string[] = [];\n groupIndicesToFetch.forEach(i => {\n manifest[i].paths.forEach(filepath => {\n const fetchUrl = filePathPrefix +\n (!filePathPrefix.endsWith('/') ? '/' : '') + filepath;\n fetchUrls.push(fetchUrl);\n });\n });\n const buffers = await fetchWeightsFunction(fetchUrls);\n\n const weightsTensorMap: NamedTensorMap = {};\n let bufferIndexOffset = 0;\n groupIndicesToFetch.forEach(i => {\n const numBuffers = manifest[i].paths.length;\n\n let groupBytes = 0;\n for (let i = 0; i < numBuffers; i++) {\n groupBytes += buffers[bufferIndexOffset + i].byteLength;\n }\n\n // Create a buffer for the whole group.\n const groupBuffer = new ArrayBuffer(groupBytes);\n const groupByteBuffer = new Uint8Array(groupBuffer);\n let groupBufferOffset = 0;\n for (let i = 0; i < numBuffers; i++) {\n const buffer = new Uint8Array(buffers[bufferIndexOffset + i]);\n groupByteBuffer.set(buffer, groupBufferOffset);\n groupBufferOffset += buffer.byteLength;\n }\n\n const weightsEntries = groupWeightsToFetch[i];\n weightsEntries.forEach(weightsEntry => {\n const byteBuffer = groupBuffer.slice(\n weightsEntry.groupOffset,\n weightsEntry.groupOffset + weightsEntry.sizeBytes);\n const nameToTensorMap =\n decodeWeights(byteBuffer, [weightsEntry.manifestEntry]);\n for (const name in nameToTensorMap) {\n weightsTensorMap[name] = nameToTensorMap[name];\n }\n });\n\n bufferIndexOffset += numBuffers;\n });\n\n return weightsTensorMap;\n };\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/**\n * IOHandler implementations based on HTTP requests in the web browser.\n *\n * Uses [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API).\n */\n\nimport {env} from '../environment';\n\nimport {assert} from '../util';\nimport {concatenateArrayBuffers, getModelArtifactsForJSON, getModelArtifactsInfoForJSON, getModelJSONForModelArtifacts} from './io_utils';\nimport {IORouter, IORouterRegistry} from './router_registry';\nimport {IOHandler, LoadOptions, ModelArtifacts, ModelJSON, OnProgressCallback, SaveResult, WeightsManifestConfig, WeightsManifestEntry} from './types';\nimport {loadWeightsAsArrayBuffer} from './weights_loader';\n\nconst OCTET_STREAM_MIME_TYPE = 'application/octet-stream';\nconst JSON_TYPE = 'application/json';\nexport class HTTPRequest implements IOHandler {\n protected readonly path: string;\n protected readonly requestInit: RequestInit;\n\n private readonly fetch: Function;\n private readonly weightUrlConverter: (weightName: string) => Promise;\n\n readonly DEFAULT_METHOD = 'POST';\n\n static readonly URL_SCHEME_REGEX = /^https?:\\/\\//;\n\n private readonly weightPathPrefix: string;\n private readonly onProgress: OnProgressCallback;\n\n constructor(path: string, loadOptions?: LoadOptions) {\n if (loadOptions == null) {\n loadOptions = {};\n }\n this.weightPathPrefix = loadOptions.weightPathPrefix;\n this.onProgress = loadOptions.onProgress;\n this.weightUrlConverter = loadOptions.weightUrlConverter;\n\n if (loadOptions.fetchFunc != null) {\n assert(\n typeof loadOptions.fetchFunc === 'function',\n () => 'Must pass a function that matches the signature of ' +\n '`fetch` (see ' +\n 'https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)');\n this.fetch = loadOptions.fetchFunc;\n } else {\n this.fetch = env().platform.fetch;\n }\n\n assert(\n path != null && path.length > 0,\n () => 'URL path for http must not be null, undefined or ' +\n 'empty.');\n\n if (Array.isArray(path)) {\n assert(\n path.length === 2,\n () => 'URL paths for http must have a length of 2, ' +\n `(actual length is ${path.length}).`);\n }\n this.path = path;\n\n if (loadOptions.requestInit != null &&\n loadOptions.requestInit.body != null) {\n throw new Error(\n 'requestInit is expected to have no pre-existing body, but has one.');\n }\n this.requestInit = loadOptions.requestInit || {};\n }\n\n async save(modelArtifacts: ModelArtifacts): Promise {\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error(\n 'BrowserHTTPRequest.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n\n const init = Object.assign({method: this.DEFAULT_METHOD}, this.requestInit);\n init.body = new FormData();\n\n const weightsManifest: WeightsManifestConfig = [{\n paths: ['./model.weights.bin'],\n weights: modelArtifacts.weightSpecs,\n }];\n const modelTopologyAndWeightManifest: ModelJSON =\n getModelJSONForModelArtifacts(modelArtifacts, weightsManifest);\n\n init.body.append(\n 'model.json',\n new Blob(\n [JSON.stringify(modelTopologyAndWeightManifest)],\n {type: JSON_TYPE}),\n 'model.json');\n\n if (modelArtifacts.weightData != null) {\n init.body.append(\n 'model.weights.bin',\n new Blob([modelArtifacts.weightData], {type: OCTET_STREAM_MIME_TYPE}),\n 'model.weights.bin');\n }\n\n const response = await this.fetch(this.path, init);\n\n if (response.ok) {\n return {\n modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts),\n responses: [response],\n };\n } else {\n throw new Error(\n `BrowserHTTPRequest.save() failed due to HTTP response status ` +\n `${response.status}.`);\n }\n }\n\n /**\n * Load model artifacts via HTTP request(s).\n *\n * See the documentation to `tf.io.http` for details on the saved\n * artifacts.\n *\n * @returns The loaded model artifacts (if loading succeeds).\n */\n async load(): Promise {\n const modelConfigRequest = await this.fetch(this.path, this.requestInit);\n\n if (!modelConfigRequest.ok) {\n throw new Error(\n `Request to ${this.path} failed with status code ` +\n `${modelConfigRequest.status}. Please verify this URL points to ` +\n `the model JSON of the model to load.`);\n }\n let modelJSON: ModelJSON;\n try {\n modelJSON = await modelConfigRequest.json();\n } catch (e) {\n let message = `Failed to parse model JSON of response from ${this.path}.`;\n // TODO(nsthorat): Remove this after some time when we're comfortable that\n // .pb files are mostly gone.\n if (this.path.endsWith('.pb')) {\n message += ' Your path contains a .pb file extension. ' +\n 'Support for .pb models have been removed in TensorFlow.js 1.0 ' +\n 'in favor of .json models. You can re-convert your Python ' +\n 'TensorFlow model using the TensorFlow.js 1.0 conversion scripts ' +\n 'or you can convert your.pb models with the \\'pb2json\\'' +\n 'NPM script in the tensorflow/tfjs-converter repository.';\n } else {\n message += ' Please make sure the server is serving valid ' +\n 'JSON for this request.';\n }\n throw new Error(message);\n }\n\n // We do not allow both modelTopology and weightsManifest to be missing.\n const modelTopology = modelJSON.modelTopology;\n const weightsManifest = modelJSON.weightsManifest;\n if (modelTopology == null && weightsManifest == null) {\n throw new Error(\n `The JSON from HTTP path ${this.path} contains neither model ` +\n `topology or manifest for weights.`);\n }\n\n return getModelArtifactsForJSON(\n modelJSON, (weightsManifest) => this.loadWeights(weightsManifest));\n }\n\n private async loadWeights(weightsManifest: WeightsManifestConfig):\n Promise<[WeightsManifestEntry[], ArrayBuffer]> {\n const weightPath = Array.isArray(this.path) ? this.path[1] : this.path;\n const [prefix, suffix] = parseUrl(weightPath);\n const pathPrefix = this.weightPathPrefix || prefix;\n\n const weightSpecs = [];\n for (const entry of weightsManifest) {\n weightSpecs.push(...entry.weights);\n }\n\n const fetchURLs: string[] = [];\n const urlPromises: Array> = [];\n for (const weightsGroup of weightsManifest) {\n for (const path of weightsGroup.paths) {\n if (this.weightUrlConverter != null) {\n urlPromises.push(this.weightUrlConverter(path));\n } else {\n fetchURLs.push(pathPrefix + path + suffix);\n }\n }\n }\n\n if (this.weightUrlConverter) {\n fetchURLs.push(...await Promise.all(urlPromises));\n }\n\n const buffers = await loadWeightsAsArrayBuffer(fetchURLs, {\n requestInit: this.requestInit,\n fetchFunc: this.fetch,\n onProgress: this.onProgress\n });\n return [weightSpecs, concatenateArrayBuffers(buffers)];\n }\n}\n\n/**\n * Extract the prefix and suffix of the url, where the prefix is the path before\n * the last file, and suffix is the search params after the last file.\n * ```\n * const url = 'http://tfhub.dev/model/1/tensorflowjs_model.pb?tfjs-format=file'\n * [prefix, suffix] = parseUrl(url)\n * // prefix = 'http://tfhub.dev/model/1/'\n * // suffix = '?tfjs-format=file'\n * ```\n * @param url the model url to be parsed.\n */\nexport function parseUrl(url: string): [string, string] {\n const lastSlash = url.lastIndexOf('/');\n const lastSearchParam = url.lastIndexOf('?');\n const prefix = url.substring(0, lastSlash);\n const suffix =\n lastSearchParam > lastSlash ? url.substring(lastSearchParam) : '';\n return [prefix + '/', suffix];\n}\n\nexport function isHTTPScheme(url: string): boolean {\n return url.match(HTTPRequest.URL_SCHEME_REGEX) != null;\n}\n\nexport const httpRouter: IORouter =\n (url: string, loadOptions?: LoadOptions) => {\n if (typeof fetch === 'undefined' &&\n (loadOptions == null || loadOptions.fetchFunc == null)) {\n // `http` uses `fetch` or `node-fetch`, if one wants to use it in\n // an environment that is not the browser or node they have to setup a\n // global fetch polyfill.\n return null;\n } else {\n let isHTTP = true;\n if (Array.isArray(url)) {\n isHTTP = url.every(urlItem => isHTTPScheme(urlItem));\n } else {\n isHTTP = isHTTPScheme(url);\n }\n if (isHTTP) {\n return http(url, loadOptions);\n }\n }\n return null;\n };\nIORouterRegistry.registerSaveRouter(httpRouter);\nIORouterRegistry.registerLoadRouter(httpRouter);\n\n/**\n * Creates an IOHandler subtype that sends model artifacts to HTTP server.\n *\n * An HTTP request of the `multipart/form-data` mime type will be sent to the\n * `path` URL. The form data includes artifacts that represent the topology\n * and/or weights of the model. In the case of Keras-style `tf.Model`, two\n * blobs (files) exist in form-data:\n * - A JSON file consisting of `modelTopology` and `weightsManifest`.\n * - A binary weights file consisting of the concatenated weight values.\n * These files are in the same format as the one generated by\n * [tfjs_converter](https://js.tensorflow.org/tutorials/import-keras.html).\n *\n * The following code snippet exemplifies the client-side code that uses this\n * function:\n *\n * ```js\n * const model = tf.sequential();\n * model.add(\n * tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));\n *\n * const saveResult = await model.save(tf.io.http(\n * 'http://model-server:5000/upload', {requestInit: {method: 'PUT'}}));\n * console.log(saveResult);\n * ```\n *\n * If the default `POST` method is to be used, without any custom parameters\n * such as headers, you can simply pass an HTTP or HTTPS URL to `model.save`:\n *\n * ```js\n * const saveResult = await model.save('http://model-server:5000/upload');\n * ```\n *\n * The following GitHub Gist\n * https://gist.github.com/dsmilkov/1b6046fd6132d7408d5257b0976f7864\n * implements a server based on [flask](https://github.com/pallets/flask) that\n * can receive the request. Upon receiving the model artifacts via the requst,\n * this particular server reconsistutes instances of [Keras\n * Models](https://keras.io/models/model/) in memory.\n *\n *\n * @param path A URL path to the model.\n * Can be an absolute HTTP path (e.g.,\n * 'http://localhost:8000/model-upload)') or a relative path (e.g.,\n * './model-upload').\n * @param requestInit Request configurations to be used when sending\n * HTTP request to server using `fetch`. It can contain fields such as\n * `method`, `credentials`, `headers`, `mode`, etc. See\n * https://developer.mozilla.org/en-US/docs/Web/API/Request/Request\n * for more information. `requestInit` must not have a body, because the\n * body will be set by TensorFlow.js. File blobs representing the model\n * topology (filename: 'model.json') and the weights of the model (filename:\n * 'model.weights.bin') will be appended to the body. If `requestInit` has a\n * `body`, an Error will be thrown.\n * @param loadOptions Optional configuration for the loading. It includes the\n * following fields:\n * - weightPathPrefix Optional, this specifies the path prefix for weight\n * files, by default this is calculated from the path param.\n * - fetchFunc Optional, custom `fetch` function. E.g., in Node.js,\n * the `fetch` from node-fetch can be used here.\n * - onProgress Optional, progress callback function, fired periodically\n * before the load is completed.\n * @returns An instance of `IOHandler`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Loading',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nexport function http(path: string, loadOptions?: LoadOptions): IOHandler {\n return new HTTPRequest(path, loadOptions);\n}\n\n/**\n * Deprecated. Use `tf.io.http`.\n * @param path\n * @param loadOptions\n */\nexport function browserHTTPRequest(\n path: string, loadOptions?: LoadOptions): IOHandler {\n return http(path, loadOptions);\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/**\n * IOHandlers that pass through the in-memory ModelArtifacts format.\n */\n\nimport {IOHandler, ModelArtifacts, SaveResult, TrainingConfig, WeightsManifestEntry} from './types';\n\nclass PassthroughLoader implements IOHandler {\n constructor(private readonly modelArtifacts?: ModelArtifacts) {}\n\n async load(): Promise {\n return this.modelArtifacts;\n }\n}\n\nclass PassthroughSaver implements IOHandler {\n constructor(\n private readonly saveHandler:\n (artifacts: ModelArtifacts) => Promise) {}\n\n async save(modelArtifacts: ModelArtifacts) {\n return this.saveHandler(modelArtifacts);\n }\n}\n\n/**\n * Creates an IOHandler that loads model artifacts from memory.\n *\n * When used in conjunction with `tf.loadLayersModel`, an instance of\n * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.\n *\n * ```js\n * const model = await tf.loadLayersModel(tf.io.fromMemory(\n * modelTopology, weightSpecs, weightData));\n * ```\n *\n * @param modelArtifacts a object containing model topology (i.e., parsed from\n * the JSON format).\n * @param weightSpecs An array of `WeightsManifestEntry` objects describing the\n * names, shapes, types, and quantization of the weight data.\n * @param weightData A single `ArrayBuffer` containing the weight data,\n * concatenated in the order described by the weightSpecs.\n * @param trainingConfig Model training configuration. Optional.\n *\n * @returns A passthrough `IOHandler` that simply loads the provided data.\n */\nexport function fromMemory(\n modelArtifacts: {}|ModelArtifacts, weightSpecs?: WeightsManifestEntry[],\n weightData?: ArrayBuffer, trainingConfig?: TrainingConfig): IOHandler {\n if (arguments.length === 1) {\n const isModelArtifacts =\n (modelArtifacts as ModelArtifacts).modelTopology != null ||\n (modelArtifacts as ModelArtifacts).weightSpecs != null;\n if (isModelArtifacts) {\n return new PassthroughLoader(modelArtifacts as ModelArtifacts);\n } else {\n // Legacy support: with only modelTopology.\n // TODO(cais): Remove this deprecated API.\n console.warn(\n 'Please call tf.io.fromMemory() with only one argument. ' +\n 'The argument should be of type ModelArtifacts. ' +\n 'The multi-argument signature of tf.io.fromMemory() has been ' +\n 'deprecated and will be removed in a future release.');\n return new PassthroughLoader({modelTopology: modelArtifacts as {}});\n }\n } else {\n // Legacy support.\n // TODO(cais): Remove this deprecated API.\n console.warn(\n 'Please call tf.io.fromMemory() with only one argument. ' +\n 'The argument should be of type ModelArtifacts. ' +\n 'The multi-argument signature of tf.io.fromMemory() has been ' +\n 'deprecated and will be removed in a future release.');\n return new PassthroughLoader({\n modelTopology: modelArtifacts as {},\n weightSpecs,\n weightData,\n trainingConfig\n });\n }\n}\n\n/**\n * Creates an IOHandler that passes saved model artifacts to a callback.\n *\n * ```js\n * function handleSave(artifacts) {\n * // ... do something with the artifacts ...\n * return {modelArtifactsInfo: {...}, ...};\n * }\n *\n * const saveResult = model.save(tf.io.withSaveHandler(handleSave));\n * ```\n *\n * @param saveHandler A function that accepts a `ModelArtifacts` and returns a\n * `SaveResult`.\n */\nexport function withSaveHandler(\n saveHandler: (artifacts: ModelArtifacts) =>\n Promise): IOHandler {\n return new PassthroughSaver(saveHandler);\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/**\n * Exports under the tf.math.* namespace.\n */\n\nimport {confusionMatrix} from './ops/confusion_matrix';\n\nexport {confusionMatrix};\n", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {BatchMatMul, BatchMatMulAttrs, BatchMatMulInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {makeTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes the dot product of two matrices, A * B. These must be matrices.\n *\n * ```js\n * const a = tf.tensor2d([1, 2], [1, 2]);\n * const b = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n *\n * a.matMul(b).print(); // or tf.matMul(a, b)\n * ```\n * @param a First matrix in dot product operation.\n * @param b Second matrix in dot product operation.\n * @param transposeA If true, `a` is transposed before multiplication.\n * @param transposeB If true, `b` is transposed before multiplication.\n *\n * @doc {heading: 'Operations', subheading: 'Matrices'}\n */\nfunction matMul_(\n a: Tensor|TensorLike, b: Tensor|TensorLike, transposeA = false,\n transposeB = false): T {\n let $a = convertToTensor(a, 'a', 'matMul');\n let $b = convertToTensor(b, 'b', 'matMul');\n [$a, $b] = makeTypesMatch($a, $b);\n\n const inputs: BatchMatMulInputs = {a: $a, b: $b};\n const attrs: BatchMatMulAttrs = {transposeA, transposeB};\n\n return ENGINE.runKernel(\n BatchMatMul, inputs as {} as NamedTensorMap, attrs as {} as NamedAttrMap);\n}\n\nexport const matMul = op({matMul_});\n", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {OneHot, OneHotAttrs, OneHotInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Creates a one-hot `tf.Tensor`. The locations represented by `indices` take\n * value `onValue` (defaults to 1), while all other locations take value\n * `offValue` (defaults to 0). If `indices` is rank `R`, the output has rank\n * `R+1` with the last axis of size `depth`.\n *\n * ```js\n * tf.oneHot(tf.tensor1d([0, 1], 'int32'), 3).print();\n * ```\n *\n * @param indices `tf.Tensor` of indices with dtype `int32`.\n * @param depth The depth of the one hot dimension.\n * @param onValue A number used to fill in the output when the index matches\n * the location.\n * @param offValue A number used to fill in the output when the index does\n * not match the location.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction oneHot_(\n indices: Tensor|TensorLike, depth: number, onValue = 1,\n offValue = 0): Tensor {\n if (depth < 2) {\n throw new Error(`Error in oneHot: depth must be >=2, but it is ${depth}`);\n }\n const $indices = convertToTensor(indices, 'indices', 'oneHot', 'int32');\n\n const inputs: OneHotInputs = {indices: $indices};\n const attrs: OneHotAttrs = {depth, onValue, offValue};\n\n return ENGINE.runKernel(\n OneHot, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const oneHot = op({oneHot_});\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Transpose, TransposeAttrs, TransposeInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\n\n/**\n * Transposes the `tf.Tensor`. Permutes the dimensions according to `perm`.\n *\n * The returned `tf.Tensor`'s dimension `i` will correspond to the input\n * dimension `perm[i]`. If `perm` is not given, it is set to `[n-1...0]`,\n * where `n` is the rank of the input `tf.Tensor`. Hence by default, this\n * operation performs a regular matrix transpose on 2-D input `tf.Tensor`s.\n *\n * ```js\n * const a = tf.tensor2d([1, 2, 3, 4, 5, 6], [2, 3]);\n *\n * a.transpose().print(); // or tf.transpose(a)\n * ```\n *\n * @param x The tensor to transpose.\n * @param perm The permutation of the dimensions of a.\n *\n * @doc {heading: 'Operations', subheading: 'Matrices'}\n */\nfunction transpose_(x: T|TensorLike, perm?: number[]): T {\n const $x = convertToTensor(x, 'x', 'transpose');\n\n if (perm == null) {\n perm = $x.shape.map((s, i) => i).reverse();\n }\n util.assert(\n $x.rank === perm.length,\n () => `Error in transpose: rank of input ${$x.rank} ` +\n `must match length of perm ${perm}.`);\n perm.forEach(axis => {\n util.assert(\n axis >= 0 && axis < $x.rank,\n () => `All entries in 'perm' must be between 0 and ${$x.rank - 1}` +\n ` but got ${perm}`);\n });\n\n if ($x.rank <= 1) {\n return $x.clone();\n }\n\n const inputs: TransposeInputs = {x: $x};\n const attrs: TransposeAttrs = {perm};\n\n return ENGINE.runKernel(\n Transpose, inputs as {} as NamedTensorMap, attrs as {} as NamedAttrMap);\n}\n\nexport const transpose = op({transpose_});\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor1D, Tensor2D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {cast} from './cast';\nimport {matMul} from './mat_mul';\nimport {oneHot} from './one_hot';\nimport {op} from './operation';\nimport {transpose} from './transpose';\n\n/**\n * Computes the confusion matrix from true labels and predicted labels.\n *\n * ```js\n * const labels = tf.tensor1d([0, 1, 2, 1, 0], 'int32');\n * const predictions = tf.tensor1d([0, 2, 2, 1, 0], 'int32');\n * const numClasses = 3;\n * const out = tf.math.confusionMatrix(labels, predictions, numClasses);\n * out.print();\n * // Expected output matrix:\n * // [[2, 0, 0],\n * // [0, 1, 1],\n * // [0, 0, 1]]\n * ```\n *\n * @param labels The target labels, assumed to be 0-based integers\n * for the classes. The shape is `[numExamples]`, where\n * `numExamples` is the number of examples included.\n * @param predictions The predicted classes, assumed to be\n * 0-based integers for the classes. Must have the same shape as `labels`.\n * @param numClasses Number of all classes, as an integer.\n * Its value must be larger than the largest element in `labels` and\n * `predictions`.\n * @returns The confusion matrix as a int32-type 2D tensor. The value at\n * row `r` and column `c` is the number of times examples of actual class\n * `r` were predicted as class `c`.\n *\n * @doc {heading: 'Operations', subheading: 'Evaluation'}\n */\nexport function confusionMatrix_(\n labels: Tensor1D|TensorLike, predictions: Tensor1D|TensorLike,\n numClasses: number): Tensor2D {\n const $labels = convertToTensor(labels, 'labels', 'confusionMatrix');\n const $predictions =\n convertToTensor(predictions, 'predictions', 'confusionMatrix');\n\n util.assert(\n numClasses == null || numClasses > 0 && Number.isInteger(numClasses),\n () => `If provided, numClasses must be a positive integer, ` +\n `but got ${numClasses}`);\n util.assert(\n $labels.rank === 1,\n () => `Expected the rank of labels to be 1, but got ${$labels.rank}`);\n util.assert(\n $predictions.rank === 1,\n () => `Expected the rank of predictions to be 1, ` +\n `but got ${$predictions.rank}`);\n util.assert(\n $labels.shape[0] === $predictions.shape[0],\n () => `Mismatch in the number of examples: ` +\n `${$labels.shape[0]} vs. ${$predictions.shape[0]}. ` +\n `Labels and predictions should have the same number of elements.`);\n util.assert(\n numClasses > 0 && Number.isInteger(numClasses),\n () => `numClasses is required to be a positive integer, but got ` +\n `${numClasses}`);\n // TODO(cais): In the future, if oneHot supports tensors inputs for\n // `numClasses`, `confusionMatrix` can make `numClasses` optional.\n\n const oneHotLabels = oneHot(cast($labels, 'int32'), numClasses) as Tensor2D;\n const oneHotPredictions =\n oneHot(cast($predictions, 'int32'), numClasses) as Tensor2D;\n const oneHotLabelsT: Tensor2D = transpose(oneHotLabels);\n const product: Tensor2D = matMul(oneHotLabelsT, oneHotPredictions);\n return cast(product, 'int32');\n}\n\nexport const confusionMatrix = op({confusionMatrix_});\n", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {env} from '../environment';\nimport {FromPixels, FromPixelsAttrs, FromPixelsInputs} from '../kernel_names';\nimport {getKernel, NamedAttrMap} from '../kernel_registry';\nimport {Tensor, Tensor2D, Tensor3D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {PixelData, TensorLike} from '../types';\n\nimport {cast} from './cast';\nimport {op} from './operation';\nimport {tensor3d} from './tensor3d';\n\nlet fromPixels2DContext: CanvasRenderingContext2D;\n\n/**\n * Creates a `tf.Tensor` from an image.\n *\n * ```js\n * const image = new ImageData(1, 1);\n * image.data[0] = 100;\n * image.data[1] = 150;\n * image.data[2] = 200;\n * image.data[3] = 255;\n *\n * tf.browser.fromPixels(image).print();\n * ```\n *\n * @param pixels The input image to construct the tensor from. The\n * supported image types are all 4-channel. You can also pass in an image\n * object with following attributes:\n * `{data: Uint8Array; width: number; height: number}`\n * @param numChannels The number of channels of the output tensor. A\n * numChannels value less than 4 allows you to ignore channels. Defaults to\n * 3 (ignores alpha channel of input image).\n *\n * @returns A Tensor3D with the shape `[height, width, numChannels]`.\n *\n * @doc {heading: 'Browser', namespace: 'browser', ignoreCI: true}\n */\nfunction fromPixels_(\n pixels: PixelData|ImageData|HTMLImageElement|HTMLCanvasElement|\n HTMLVideoElement|ImageBitmap,\n numChannels = 3): Tensor3D {\n // Sanity checks.\n if (numChannels > 4) {\n throw new Error(\n 'Cannot construct Tensor with more than 4 channels from pixels.');\n }\n if (pixels == null) {\n throw new Error('pixels passed to tf.browser.fromPixels() can not be null');\n }\n let isPixelData = false;\n let isImageData = false;\n let isVideo = false;\n let isImage = false;\n let isCanvasLike = false;\n let isImageBitmap = false;\n if ((pixels as PixelData).data instanceof Uint8Array) {\n isPixelData = true;\n } else if (\n typeof (ImageData) !== 'undefined' && pixels instanceof ImageData) {\n isImageData = true;\n } else if (\n typeof (HTMLVideoElement) !== 'undefined' &&\n pixels instanceof HTMLVideoElement) {\n isVideo = true;\n } else if (\n typeof (HTMLImageElement) !== 'undefined' &&\n pixels instanceof HTMLImageElement) {\n isImage = true;\n // tslint:disable-next-line: no-any\n } else if ((pixels as any).getContext != null) {\n isCanvasLike = true;\n } else if (\n typeof (ImageBitmap) !== 'undefined' && pixels instanceof ImageBitmap) {\n isImageBitmap = true;\n } else {\n throw new Error(\n 'pixels passed to tf.browser.fromPixels() must be either an ' +\n `HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData ` +\n `in browser, or OffscreenCanvas, ImageData in webworker` +\n ` or {data: Uint32Array, width: number, height: number}, ` +\n `but was ${(pixels as {}).constructor.name}`);\n }\n if (isVideo) {\n const HAVE_CURRENT_DATA_READY_STATE = 2;\n if (isVideo &&\n (pixels as HTMLVideoElement).readyState <\n HAVE_CURRENT_DATA_READY_STATE) {\n throw new Error(\n 'The video element has not loaded data yet. Please wait for ' +\n '`loadeddata` event on the