{"version":3,"file":"tf.es2017.min.js","sources":["../node_modules/@tensorflow/tfjs-core/dist/backends/backend.js","../node_modules/@tensorflow/tfjs-core/dist/util_base.js","../node_modules/@tensorflow/tfjs-core/dist/environment.js","../node_modules/@tensorflow/tfjs-core/dist/global_util.js","../node_modules/@tensorflow/tfjs-core/dist/kernel_names.js","../node_modules/@tensorflow/tfjs-core/dist/kernel_registry.js","../node_modules/@tensorflow/tfjs-core/dist/util.js","../node_modules/@tensorflow/tfjs-core/dist/profiler.js","../node_modules/@tensorflow/tfjs-core/dist/tensor_format.js","../node_modules/@tensorflow/tfjs-core/dist/tensor.js","../node_modules/@tensorflow/tfjs-core/dist/types.js","../node_modules/@tensorflow/tfjs-core/dist/tensor_util.js","../node_modules/@tensorflow/tfjs-core/dist/engine.js","../node_modules/@tensorflow/tfjs-core/dist/tape.js","../node_modules/@tensorflow/tfjs-core/dist/device_util.js","../node_modules/@tensorflow/tfjs-core/dist/flags.js","../node_modules/@tensorflow/tfjs-core/dist/tensor_util_env.js","../node_modules/@tensorflow/tfjs-core/dist/ops/operation.js","../node_modules/@tensorflow/tfjs-core/dist/ops/complex.js","../node_modules/@tensorflow/tfjs-core/dist/ops/tensor_ops_util.js","../node_modules/@tensorflow/tfjs-core/dist/ops/tensor.js","../node_modules/@tensorflow/tfjs-core/dist/io/types.js","../node_modules/@tensorflow/tfjs-core/dist/io/io_utils.js","../node_modules/@tensorflow/tfjs-core/dist/io/router_registry.js","../node_modules/@tensorflow/tfjs-core/dist/io/indexed_db.js","../node_modules/@tensorflow/tfjs-core/dist/io/local_storage.js","../node_modules/@tensorflow/tfjs-core/dist/io/model_management.js","../node_modules/@tensorflow/tfjs-core/dist/platforms/platform_browser.js","../node_modules/@tensorflow/tfjs-core/dist/platforms/platform_node.js","../node_modules/@tensorflow/tfjs-core/dist/ops/buffer.js","../node_modules/@tensorflow/tfjs-core/dist/ops/cast.js","../node_modules/@tensorflow/tfjs-core/dist/ops/clone.js","../node_modules/@tensorflow/tfjs-core/dist/ops/print.js","../node_modules/@tensorflow/tfjs-core/dist/base_side_effects.js","../node_modules/@tensorflow/tfjs-core/dist/io/browser_files.js","../node_modules/@tensorflow/tfjs-core/dist/io/progress.js","../node_modules/@tensorflow/tfjs-core/dist/io/weights_loader.js","../node_modules/@tensorflow/tfjs-core/dist/io/http.js","../node_modules/@tensorflow/tfjs-core/dist/io/passthrough.js","../node_modules/@tensorflow/tfjs-core/dist/ops/reshape.js","../node_modules/@tensorflow/tfjs-core/dist/ops/mat_mul.js","../node_modules/@tensorflow/tfjs-core/dist/ops/one_hot.js","../node_modules/@tensorflow/tfjs-core/dist/ops/transpose.js","../node_modules/@tensorflow/tfjs-core/dist/ops/confusion_matrix.js","../node_modules/@tensorflow/tfjs-core/dist/ops/tensor3d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/browser.js","../node_modules/@tensorflow/tfjs-core/dist/ops/gather_nd_util.js","../node_modules/@tensorflow/tfjs-core/dist/ops/scatter_nd_util.js","../node_modules/@tensorflow/tfjs-core/dist/ops/slice_util.js","../node_modules/@tensorflow/tfjs-core/dist/serialization.js","../node_modules/@tensorflow/tfjs-core/dist/test_util.js","../node_modules/@tensorflow/tfjs-core/dist/globals.js","../node_modules/@tensorflow/tfjs-core/dist/ops/add.js","../node_modules/@tensorflow/tfjs-core/dist/ops/floorDiv.js","../node_modules/@tensorflow/tfjs-core/dist/ops/div.js","../node_modules/@tensorflow/tfjs-core/dist/ops/mul.js","../node_modules/@tensorflow/tfjs-core/dist/ops/abs.js","../node_modules/@tensorflow/tfjs-core/dist/ops/acos.js","../node_modules/@tensorflow/tfjs-core/dist/ops/acosh.js","../node_modules/@tensorflow/tfjs-core/dist/ops/add_n.js","../node_modules/@tensorflow/tfjs-core/dist/ops/axis_util.js","../node_modules/@tensorflow/tfjs-core/dist/ops/all.js","../node_modules/@tensorflow/tfjs-core/dist/ops/any.js","../node_modules/@tensorflow/tfjs-core/dist/ops/arg_max.js","../node_modules/@tensorflow/tfjs-core/dist/ops/arg_min.js","../node_modules/@tensorflow/tfjs-core/dist/ops/asin.js","../node_modules/@tensorflow/tfjs-core/dist/ops/asinh.js","../node_modules/@tensorflow/tfjs-core/dist/ops/atan.js","../node_modules/@tensorflow/tfjs-core/dist/ops/atan2.js","../node_modules/@tensorflow/tfjs-core/dist/ops/atanh.js","../node_modules/@tensorflow/tfjs-core/dist/ops/conv_util.js","../node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool.js","../node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool_3d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/concat_util.js","../node_modules/@tensorflow/tfjs-core/dist/ops/concat.js","../node_modules/@tensorflow/tfjs-core/dist/ops/sigmoid.js","../node_modules/@tensorflow/tfjs-core/dist/ops/slice.js","../node_modules/@tensorflow/tfjs-core/dist/ops/tanh.js","../node_modules/@tensorflow/tfjs-core/dist/ops/basic_lstm_cell.js","../node_modules/@tensorflow/tfjs-core/dist/ops/batch_to_space_nd.js","../node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm.js","../node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm_util.js","../node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm2d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm3d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm4d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/broadcast_to.js","../node_modules/@tensorflow/tfjs-core/dist/ops/ceil.js","../node_modules/@tensorflow/tfjs-core/dist/ops/clip_by_value.js","../node_modules/@tensorflow/tfjs-core/dist/ops/concat_1d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/concat_2d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/concat_3d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/concat_4d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/conv2d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/conv1d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_backprop_input.js","../node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_transpose.js","../node_modules/@tensorflow/tfjs-core/dist/ops/conv3d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/conv3d_backprop_input.js","../node_modules/@tensorflow/tfjs-core/dist/ops/conv3d_transpose.js","../node_modules/@tensorflow/tfjs-core/dist/ops/cos.js","../node_modules/@tensorflow/tfjs-core/dist/ops/cosh.js","../node_modules/@tensorflow/tfjs-core/dist/ops/cumsum.js","../node_modules/@tensorflow/tfjs-core/dist/ops/depth_to_space.js","../node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/diag.js","../node_modules/@tensorflow/tfjs-core/dist/ops/dilation2d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/broadcast_util.js","../node_modules/@tensorflow/tfjs-core/dist/ops/equal.js","../node_modules/@tensorflow/tfjs-core/dist/ops/where.js","../node_modules/@tensorflow/tfjs-core/dist/ops/zeros_like.js","../node_modules/@tensorflow/tfjs-core/dist/ops/div_no_nan.js","../node_modules/@tensorflow/tfjs-core/dist/ops/dot.js","../node_modules/@tensorflow/tfjs-core/dist/ops/elu.js","../node_modules/@tensorflow/tfjs-core/dist/ops/erf.js","../node_modules/@tensorflow/tfjs-core/dist/ops/exp.js","../node_modules/@tensorflow/tfjs-core/dist/ops/expand_dims.js","../node_modules/@tensorflow/tfjs-core/dist/ops/expm1.js","../node_modules/@tensorflow/tfjs-core/dist/ops/tile.js","../node_modules/@tensorflow/tfjs-core/dist/ops/eye.js","../node_modules/@tensorflow/tfjs-core/dist/ops/fill.js","../node_modules/@tensorflow/tfjs-core/dist/ops/floor.js","../node_modules/@tensorflow/tfjs-core/dist/ops/reduce_util.js","../node_modules/@tensorflow/tfjs-core/dist/ops/segment_util.js","../node_modules/@tensorflow/tfjs-core/dist/ops/gather.js","../node_modules/@tensorflow/tfjs-core/dist/ops/greater.js","../node_modules/@tensorflow/tfjs-core/dist/ops/greater_equal.js","../node_modules/@tensorflow/tfjs-core/dist/ops/imag.js","../node_modules/@tensorflow/tfjs-core/dist/ops/is_finite.js","../node_modules/@tensorflow/tfjs-core/dist/ops/is_inf.js","../node_modules/@tensorflow/tfjs-core/dist/ops/is_nan.js","../node_modules/@tensorflow/tfjs-core/dist/ops/maximum.js","../node_modules/@tensorflow/tfjs-core/dist/ops/scalar.js","../node_modules/@tensorflow/tfjs-core/dist/ops/leaky_relu.js","../node_modules/@tensorflow/tfjs-core/dist/ops/less.js","../node_modules/@tensorflow/tfjs-core/dist/ops/less_equal.js","../node_modules/@tensorflow/tfjs-core/dist/ops/linspace.js","../node_modules/@tensorflow/tfjs-core/dist/ops/local_response_normalization.js","../node_modules/@tensorflow/tfjs-core/dist/ops/log.js","../node_modules/@tensorflow/tfjs-core/dist/ops/log1p.js","../node_modules/@tensorflow/tfjs-core/dist/gradients.js","../node_modules/@tensorflow/tfjs-core/dist/ops/neg.js","../node_modules/@tensorflow/tfjs-core/dist/ops/softplus.js","../node_modules/@tensorflow/tfjs-core/dist/ops/log_sigmoid.js","../node_modules/@tensorflow/tfjs-core/dist/ops/max.js","../node_modules/@tensorflow/tfjs-core/dist/ops/sub.js","../node_modules/@tensorflow/tfjs-core/dist/ops/sum.js","../node_modules/@tensorflow/tfjs-core/dist/ops/log_softmax.js","../node_modules/@tensorflow/tfjs-core/dist/ops/log_sum_exp.js","../node_modules/@tensorflow/tfjs-core/dist/ops/logical_and.js","../node_modules/@tensorflow/tfjs-core/dist/ops/logical_not.js","../node_modules/@tensorflow/tfjs-core/dist/ops/logical_or.js","../node_modules/@tensorflow/tfjs-core/dist/ops/logical_xor.js","../node_modules/@tensorflow/tfjs-core/dist/ops/max_pool.js","../node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_3d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_with_argmax.js","../node_modules/@tensorflow/tfjs-core/dist/ops/zeros.js","../node_modules/@tensorflow/tfjs-core/dist/ops/ones.js","../node_modules/@tensorflow/tfjs-core/dist/ops/mean.js","../node_modules/@tensorflow/tfjs-core/dist/ops/min.js","../node_modules/@tensorflow/tfjs-core/dist/ops/minimum.js","../node_modules/@tensorflow/tfjs-core/dist/ops/mirror_pad.js","../node_modules/@tensorflow/tfjs-core/dist/ops/mod.js","../node_modules/@tensorflow/tfjs-core/dist/ops/square.js","../node_modules/@tensorflow/tfjs-core/dist/ops/moments.js","../node_modules/@tensorflow/tfjs-core/dist/ops/multi_rnn_cell.js","../node_modules/@tensorflow/tfjs-core/dist/ops/multinomial.js","../node_modules/@tensorflow/tfjs-core/dist/ops/not_equal.js","../node_modules/@tensorflow/tfjs-core/dist/ops/real.js","../node_modules/@tensorflow/tfjs-core/dist/ops/ones_like.js","../node_modules/@tensorflow/tfjs-core/dist/ops/outer_product.js","../node_modules/@tensorflow/tfjs-core/dist/ops/pad.js","../node_modules/@tensorflow/tfjs-core/dist/ops/pad1d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/pad2d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/pad3d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/pad4d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/space_to_batch_nd.js","../node_modules/@tensorflow/tfjs-core/dist/ops/pool.js","../node_modules/@tensorflow/tfjs-core/dist/ops/pow.js","../node_modules/@tensorflow/tfjs-core/dist/ops/prelu.js","../node_modules/@tensorflow/tfjs-core/dist/ops/prod.js","../node_modules/@tensorflow/tfjs-core/dist/ops/rand.js","../node_modules/seedrandom/lib/alea.js","../node_modules/seedrandom/lib/xor128.js","../node_modules/seedrandom/lib/xorwow.js","../node_modules/seedrandom/lib/xorshift7.js","../node_modules/seedrandom/lib/xor4096.js","../node_modules/seedrandom/lib/tychei.js","../node_modules/seedrandom/seedrandom.js","../node_modules/seedrandom/index.js","../node_modules/@tensorflow/tfjs-core/dist/ops/rand_util.js","../node_modules/@tensorflow/tfjs-core/dist/ops/random_gamma.js","../node_modules/@tensorflow/tfjs-core/dist/ops/random_normal.js","../node_modules/@tensorflow/tfjs-core/dist/ops/random_uniform.js","../node_modules/@tensorflow/tfjs-core/dist/ops/tensor1d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/range.js","../node_modules/@tensorflow/tfjs-core/dist/ops/reciprocal.js","../node_modules/@tensorflow/tfjs-core/dist/ops/relu.js","../node_modules/@tensorflow/tfjs-core/dist/ops/relu6.js","../node_modules/@tensorflow/tfjs-core/dist/ops/reverse.js","../node_modules/@tensorflow/tfjs-core/dist/ops/reverse_1d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/reverse_2d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/reverse_3d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/reverse_4d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/round.js","../node_modules/@tensorflow/tfjs-core/dist/ops/rsqrt.js","../node_modules/@tensorflow/tfjs-core/dist/ops/selu.js","../node_modules/@tensorflow/tfjs-core/dist/ops/separable_conv2d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/setdiff1d_async.js","../node_modules/@tensorflow/tfjs-core/dist/ops/sign.js","../node_modules/@tensorflow/tfjs-core/dist/ops/sin.js","../node_modules/@tensorflow/tfjs-core/dist/ops/sinh.js","../node_modules/@tensorflow/tfjs-core/dist/ops/slice1d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/slice2d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/slice3d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/slice4d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/softmax.js","../node_modules/@tensorflow/tfjs-core/dist/ops/spectral/fft.js","../node_modules/@tensorflow/tfjs-core/dist/ops/spectral/ifft.js","../node_modules/@tensorflow/tfjs-core/dist/ops/spectral/irfft.js","../node_modules/@tensorflow/tfjs-core/dist/ops/split_util.js","../node_modules/@tensorflow/tfjs-core/dist/ops/split.js","../node_modules/@tensorflow/tfjs-core/dist/ops/spectral/rfft.js","../node_modules/@tensorflow/tfjs-core/dist/ops/sqrt.js","../node_modules/@tensorflow/tfjs-core/dist/ops/squared_difference.js","../node_modules/@tensorflow/tfjs-core/dist/ops/squeeze.js","../node_modules/@tensorflow/tfjs-core/dist/ops/stack.js","../node_modules/@tensorflow/tfjs-core/dist/ops/step.js","../node_modules/@tensorflow/tfjs-core/dist/ops/strided_slice.js","../node_modules/@tensorflow/tfjs-core/dist/ops/tan.js","../node_modules/@tensorflow/tfjs-core/dist/ops/tensor2d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/tensor4d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/topk.js","../node_modules/@tensorflow/tfjs-core/dist/ops/truncated_normal.js","../node_modules/@tensorflow/tfjs-core/dist/ops/unique.js","../node_modules/@tensorflow/tfjs-core/dist/ops/unsorted_segment_sum.js","../node_modules/@tensorflow/tfjs-core/dist/ops/unstack.js","../node_modules/@tensorflow/tfjs-core/dist/ops/variable.js","../node_modules/@tensorflow/tfjs-core/dist/backends/where_impl.js","../node_modules/@tensorflow/tfjs-core/dist/ops/where_async.js","../node_modules/@tensorflow/tfjs-core/dist/ops/boolean_mask.js","../node_modules/@tensorflow/tfjs-core/dist/ops/compare.js","../node_modules/@tensorflow/tfjs-core/dist/ops/binary_ops.js","../node_modules/@tensorflow/tfjs-core/dist/ops/norm.js","../node_modules/@tensorflow/tfjs-core/dist/ops/moving_average.js","../node_modules/@tensorflow/tfjs-core/dist/ops/scatter_nd.js","../node_modules/@tensorflow/tfjs-core/dist/ops/sparse_to_dense.js","../node_modules/@tensorflow/tfjs-core/dist/ops/sparse_to_dense_util.js","../node_modules/@tensorflow/tfjs-core/dist/ops/gather_nd.js","../node_modules/@tensorflow/tfjs-core/dist/ops/dropout.js","../node_modules/@tensorflow/tfjs-core/dist/ops/dropout_util.js","../node_modules/@tensorflow/tfjs-core/dist/ops/signal_ops_util.js","../node_modules/@tensorflow/tfjs-core/dist/ops/in_top_k.js","../node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_backprop_filter.js","../node_modules/@tensorflow/tfjs-core/dist/ops/fused_util.js","../node_modules/@tensorflow/tfjs-core/dist/ops/fused/conv2d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d_native_backprop_filter.js","../node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d_native_backprop_input.js","../node_modules/@tensorflow/tfjs-core/dist/ops/fused/depthwise_conv2d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/fused/mat_mul.js","../node_modules/@tensorflow/tfjs-core/dist/ops/signal/hamming_window.js","../node_modules/@tensorflow/tfjs-core/dist/ops/signal/hann_window.js","../node_modules/@tensorflow/tfjs-core/dist/ops/signal/frame.js","../node_modules/@tensorflow/tfjs-core/dist/ops/signal/stft.js","../node_modules/@tensorflow/tfjs-core/dist/ops/image/crop_and_resize.js","../node_modules/@tensorflow/tfjs-core/dist/ops/image/flip_left_right.js","../node_modules/@tensorflow/tfjs-core/dist/ops/image/rotate_with_offset.js","../node_modules/@tensorflow/tfjs-core/dist/ops/nonmax_util.js","../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression.js","../node_modules/@tensorflow/tfjs-core/dist/backends/array_util.js","../node_modules/@tensorflow/tfjs-core/dist/backends/non_max_suppression_impl.js","../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_async.js","../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_with_score.js","../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_with_score_async.js","../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_padded.js","../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_padded_async.js","../node_modules/@tensorflow/tfjs-core/dist/ops/image/resize_bilinear.js","../node_modules/@tensorflow/tfjs-core/dist/ops/image/resize_nearest_neighbor.js","../node_modules/@tensorflow/tfjs-core/dist/ops/linalg/band_part.js","../node_modules/@tensorflow/tfjs-core/dist/ops/linalg/gram_schmidt.js","../node_modules/@tensorflow/tfjs-core/dist/ops/linalg/qr.js","../node_modules/@tensorflow/tfjs-core/dist/ops/loss_ops_utils.js","../node_modules/@tensorflow/tfjs-core/dist/ops/losses/compute_weighted_loss.js","../node_modules/@tensorflow/tfjs-core/dist/ops/losses/absolute_difference.js","../node_modules/@tensorflow/tfjs-core/dist/ops/losses/cosine_distance.js","../node_modules/@tensorflow/tfjs-core/dist/ops/losses/hinge_loss.js","../node_modules/@tensorflow/tfjs-core/dist/ops/losses/huber_loss.js","../node_modules/@tensorflow/tfjs-core/dist/ops/losses/log_loss.js","../node_modules/@tensorflow/tfjs-core/dist/ops/losses/mean_squared_error.js","../node_modules/@tensorflow/tfjs-core/dist/ops/losses/sigmoid_cross_entropy.js","../node_modules/@tensorflow/tfjs-core/dist/ops/losses/softmax_cross_entropy.js","../node_modules/@tensorflow/tfjs-core/dist/ops/ops.js","../node_modules/@tensorflow/tfjs-core/dist/optimizers/optimizer.js","../node_modules/@tensorflow/tfjs-core/dist/optimizers/adadelta_optimizer.js","../node_modules/@tensorflow/tfjs-core/dist/optimizers/adagrad_optimizer.js","../node_modules/@tensorflow/tfjs-core/dist/optimizers/adam_optimizer.js","../node_modules/@tensorflow/tfjs-core/dist/optimizers/adamax_optimizer.js","../node_modules/@tensorflow/tfjs-core/dist/optimizers/sgd_optimizer.js","../node_modules/@tensorflow/tfjs-core/dist/optimizers/momentum_optimizer.js","../node_modules/@tensorflow/tfjs-core/dist/optimizers/rmsprop_optimizer.js","../node_modules/@tensorflow/tfjs-core/dist/optimizers/optimizer_constructors.js","../node_modules/@tensorflow/tfjs-core/dist/train.js","../node_modules/@tensorflow/tfjs-core/dist/browser_util.js","../node_modules/@tensorflow/tfjs-core/dist/ops/rotate_util.js","../node_modules/@tensorflow/tfjs-core/dist/ops/array_ops_util.js","../node_modules/@tensorflow/tfjs-core/dist/ops/selu_util.js","../node_modules/@tensorflow/tfjs-core/dist/log.js","../node_modules/@tensorflow/tfjs-core/dist/backends/complex_util.js","../node_modules/@tensorflow/tfjs-core/dist/backends/backend_util.js","../node_modules/@tensorflow/tfjs-core/dist/ops/erf_util.js","../node_modules/@tensorflow/tfjs-core/dist/backends/split_shared.js","../node_modules/@tensorflow/tfjs-core/dist/backends/tile_impl.js","../node_modules/@tensorflow/tfjs-core/dist/backends/topk_impl.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Abs_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Acos_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Acosh_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Add_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/AddN_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/ArgMax_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/ArgMin_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Asin_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Asinh_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Atan2_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Atan_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Atanh_grad.js","../node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool_3d_backprop.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/AvgPool3D_grad.js","../node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool_backprop.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/AvgPool_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/BatchMatMul_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/BatchToSpaceND_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/BroadcastTo_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Cast_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Ceil_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/ClipByValue_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Concat_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Conv2D_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Conv2DBackpropInput_grad.js","../node_modules/@tensorflow/tfjs-core/dist/ops/conv3d_backprop_filter.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Conv3D_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Cos_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Cosh_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Cumsum_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/DepthwiseConv2dNative_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Dilation2D_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Div_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Elu_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Erf_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Exp_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Expm1_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Floor_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/FloorDiv_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/FusedBatchNorm_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/GatherV2_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/GreaterEqual_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Identity_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/IsFinite_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/IsInf_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/IsNan_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Log1p_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Log_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/LogSoftmax_grad.js","../node_modules/@tensorflow/tfjs-core/dist/ops/local_response_normalization_backprop.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/LRN_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/min_max_grad_util.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Max_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Maximum_grad.js","../node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_3d_backprop.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/MaxPool3D_grad.js","../node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_backprop.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/PadV2_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/SpaceToBatchND_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/SplitV_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/ZerosLike_grad.js","../node_modules/@tensorflow/tfjs-core/dist/register_all_gradients.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/MaxPool_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Min_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Minimum_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/MirrorPad_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Mod_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Multiply_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Negate_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/OneHot_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/OnesLike_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Pow_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Prelu_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Reciprocal_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Relu6_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Relu_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Reshape_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/ResizeBilinear_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/ResizeNearestNeighbor_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Reverse_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Round_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Rsqrt_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/SelectV2_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Selu_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Sigmoid_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Sign_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Sin_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Sinh_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Slice_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Softmax_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Softplus_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Sqrt_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/SquaredDifference_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Square_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Step_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Sub_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Sum_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Tan_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Tanh_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Tile_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Transpose_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/Unpack_grad.js","../node_modules/@tensorflow/tfjs-core/dist/gradients/UnsortedSegmentSum_grad.js","../node_modules/@tensorflow/tfjs-layers/dist/backend/common.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/abs.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/acos.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/acosh.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/add_strict.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/add.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/all.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/any.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/arg_max.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/arg_min.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as_scalar.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as_type.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as1d.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as2d.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as3d.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as4d.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as5d.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/asin.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/asinh.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/atan.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/atan2.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/atanh.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/avg_pool.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/batch_to_space_nd.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/batchnorm.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/broadcast_to.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/cast.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/ceil.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/clip_by_value.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/concat.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/conv1d.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/conv2d_transpose.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/conv2d.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/cos.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/cosh.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/cumsum.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/depth_to_space.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/depthwise_conv2D_deprecated.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/depthwise_conv2d.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/dilation2d.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/div_no_nan.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/div_strict.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/div.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/dot.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/elu.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/equal_strict.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/equal.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/erf.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/exp.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/expand_dims.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/expm1.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/fft.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/flatten.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/floor.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/floorDiv.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/gather.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/greater_equal_strict.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/greater_equal.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/greater_strict.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/greater.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/ifft.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/irfft.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/is_finite.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/is_inf.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/is_nan.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/leaky_relu.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/less_equal_strict.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/less_equal.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/less_strict.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/less.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/local_response_normalization.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/log_sigmoid.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/log_softmax.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/log_sum_exp.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/log.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/log1p.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/logical_and.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/logical_not.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/logical_or.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/logical_xor.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mat_mul.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/max_pool.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/max.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/maximum_strict.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/maximum.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mean.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/min.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/minimum_strict.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/minimum.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mirror_pad.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mod_strict.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mod.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mul_strict.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mul.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/neg.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/norm.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/not_equal_strict.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/not_equal.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/one_hot.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/ones_like.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/pad.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/pool.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/pow_strict.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/pow.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/prelu.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/prod.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/reciprocal.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/relu.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/relu6.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/reshape_as.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/reshape.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/resize_bilinear.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/resize_nearest_neighbor.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/reverse.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/rfft.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/round.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/rsqrt.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/selu.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/separable_conv2d.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sigmoid.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sign.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sin.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sinh.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/slice.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/softmax.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/softplus.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/space_to_batch_nd.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/split.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sqrt.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/square.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/squared_difference.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/squared_difference_strict.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/squeeze.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/stack.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/step.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/strided_slice.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sub_strict.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sub.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sum.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/tan.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/tanh.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/tile.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/to_bool.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/to_float.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/to_int.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/topk.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/transpose.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/unique.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/unsorted_segment_sum.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/unstack.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/where.js","../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/zeros_like.js","../node_modules/@tensorflow/tfjs-layers/dist/errors.js","../node_modules/@tensorflow/tfjs-layers/dist/utils/generic_utils.js","../node_modules/@tensorflow/tfjs-layers/dist/constraints.js","../node_modules/@tensorflow/tfjs-layers/dist/exports_constraints.js","../node_modules/@tensorflow/tfjs-layers/dist/keras_format/common.js","../node_modules/@tensorflow/tfjs-layers/dist/common.js","../node_modules/@tensorflow/tfjs-layers/dist/utils/math_utils.js","../node_modules/@tensorflow/tfjs-layers/dist/backend/tfjs_backend.js","../node_modules/@tensorflow/tfjs-layers/dist/keras_format/initializer_config.js","../node_modules/@tensorflow/tfjs-layers/dist/initializers.js","../node_modules/@tensorflow/tfjs-layers/dist/exports_initializers.js","../node_modules/@tensorflow/tfjs-layers/dist/backend/state.js","../node_modules/@tensorflow/tfjs-layers/dist/utils/types_utils.js","../node_modules/@tensorflow/tfjs-layers/dist/utils/variable_utils.js","../node_modules/@tensorflow/tfjs-layers/dist/variables.js","../node_modules/@tensorflow/tfjs-layers/dist/engine/topology.js","../node_modules/@tensorflow/tfjs-layers/dist/engine/input_layer.js","../node_modules/@tensorflow/tfjs-layers/dist/logs.js","../node_modules/@tensorflow/tfjs-layers/dist/base_callbacks.js","../node_modules/@tensorflow/tfjs-layers/dist/layers/serialization.js","../node_modules/@tensorflow/tfjs-layers/dist/losses.js","../node_modules/@tensorflow/tfjs-layers/dist/metrics.js","../node_modules/@tensorflow/tfjs-layers/dist/user_defined_metadata.js","../node_modules/@tensorflow/tfjs-layers/dist/utils/layer_utils.js","../node_modules/@tensorflow/tfjs-layers/dist/utils/serialization_utils.js","../node_modules/@tensorflow/tfjs-layers/dist/engine/executor.js","../node_modules/@tensorflow/tfjs-layers/dist/engine/container.js","../node_modules/@tensorflow/tfjs-layers/dist/engine/training_utils.js","../node_modules/@tensorflow/tfjs-layers/dist/engine/training_dataset.js","../node_modules/@tensorflow/tfjs-layers/dist/engine/training_tensors.js","../node_modules/@tensorflow/tfjs-layers/dist/engine/training.js","../node_modules/@tensorflow/tfjs-layers/dist/optimizers.js","../node_modules/@tensorflow/tfjs-layers/dist/models.js","../node_modules/@tensorflow/tfjs-layers/dist/exports.js","../node_modules/@tensorflow/tfjs-layers/dist/activations.js","../node_modules/@tensorflow/tfjs-layers/dist/regularizers.js","../node_modules/@tensorflow/tfjs-layers/dist/layers/advanced_activations.js","../node_modules/@tensorflow/tfjs-layers/dist/utils/conv_utils.js","../node_modules/@tensorflow/tfjs-layers/dist/layers/convolutional.js","../node_modules/@tensorflow/tfjs-layers/dist/layers/convolutional_depthwise.js","../node_modules/@tensorflow/tfjs-layers/dist/layers/recurrent.js","../node_modules/@tensorflow/tfjs-layers/dist/layers/convolutional_recurrent.js","../node_modules/@tensorflow/tfjs-layers/dist/layers/core.js","../node_modules/@tensorflow/tfjs-layers/dist/layers/embeddings.js","../node_modules/@tensorflow/tfjs-layers/dist/layers/merge.js","../node_modules/@tensorflow/tfjs-layers/dist/layers/noise.js","../node_modules/@tensorflow/tfjs-layers/dist/layers/normalization.js","../node_modules/@tensorflow/tfjs-layers/dist/layers/padding.js","../node_modules/@tensorflow/tfjs-layers/dist/layers/pooling.js","../node_modules/@tensorflow/tfjs-layers/dist/layers/wrappers.js","../node_modules/@tensorflow/tfjs-layers/dist/exports_layers.js","../node_modules/@tensorflow/tfjs-layers/dist/exports_metrics.js","../node_modules/@tensorflow/tfjs-layers/dist/exports_regularizers.js","../node_modules/@tensorflow/tfjs-layers/dist/callbacks.js","../node_modules/@tensorflow/tfjs-converter/dist/data/compiled_api.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/custom_op/register.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/utils.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/arithmetic.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/basic_math.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/control.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/convolution.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/creation.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/dynamic.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/evaluation.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/graph.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/hash_table.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/image.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/logical.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/matrices.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/normalization.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/reduction.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/slice_join.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/spectral.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/transformation.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/operation_mapper.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/custom_op/node_value_impl.js","../node_modules/@tensorflow/tfjs-converter/dist/executor/tensor_utils.js","../node_modules/@tensorflow/tfjs-converter/dist/executor/tensor_array.js","../node_modules/@tensorflow/tfjs-converter/dist/executor/tensor_list.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/control_executor.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/convolution_executor.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/dynamic_executor.js","../node_modules/@tensorflow/tfjs-converter/dist/executor/hash_table.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/operation_executor.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/arithmetic_executor.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/basic_math_executor.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/creation_executor.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/evaluation_executor.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/image_executor.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/graph_executor.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/logical_executor.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/matrices_executor.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/normalization_executor.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/reduction_executor.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/slice_join_executor.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/spectral_executor.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/transformation_executor.js","../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/hash_table_executor.js","../node_modules/@tensorflow/tfjs-converter/dist/executor/execution_context.js","../node_modules/@tensorflow/tfjs-converter/dist/executor/model_analysis.js","../node_modules/@tensorflow/tfjs-converter/dist/executor/graph_executor.js","../node_modules/@tensorflow/tfjs-converter/dist/executor/resource_manager.js","../node_modules/@tensorflow/tfjs-converter/dist/executor/graph_model.js","../node_modules/@tensorflow/tfjs-data/dist/util/deep_map.js","../node_modules/@tensorflow/tfjs-data/dist/util/deep_clone.js","../node_modules/@tensorflow/tfjs-data/dist/util/ring_buffer.js","../node_modules/@tensorflow/tfjs-data/dist/util/growing_ring_buffer.js","../node_modules/@tensorflow/tfjs-data/dist/iterators/lazy_iterator.js","../node_modules/@tensorflow/tfjs-data/dist/dataset.js","../node_modules/@tensorflow/tfjs-data/dist/datasets/text_line_dataset.js","../node_modules/@tensorflow/tfjs-data/dist/datasets/csv_dataset.js","../node_modules/@tensorflow/tfjs-data/dist/iterators/microphone_iterator.js","../node_modules/@tensorflow/tfjs-data/dist/iterators/webcam_iterator.js","../node_modules/@tensorflow/tfjs-data/dist/datasource.js","../node_modules/@tensorflow/tfjs-data/dist/iterators/string_iterator.js","../node_modules/@tensorflow/tfjs-data/dist/iterators/byte_chunk_iterator.js","../node_modules/@tensorflow/tfjs-data/dist/iterators/file_chunk_iterator.js","../node_modules/@tensorflow/tfjs-data/dist/iterators/url_chunk_iterator.js","../node_modules/@tensorflow/tfjs-data/dist/util/source_util.js","../node_modules/@tensorflow/tfjs-data/dist/sources/file_data_source.js","../node_modules/@tensorflow/tfjs-data/dist/sources/url_data_source.js","../node_modules/@tensorflow/tfjs-data/dist/readers.js","../node_modules/@tensorflow/tfjs-data/dist/version.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/cpu_util.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/backend_cpu.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Abs.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/binary_impl.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Complex.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Identity.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Real.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Cast.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/kernel_utils.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Add.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/unary_impl.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/unary_utils.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Ceil.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Exp.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Expm1.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Floor.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Log.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Max_impl.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Multiply.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/NotEqual.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Rsqrt.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Slice.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/SquaredDifference.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sub.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Transpose_impl.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Unique_impl.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/base.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Elu.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Prelu.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Relu.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Relu6.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/fused_utils.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Reshape.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/BatchMatMul.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/_FusedMatMul.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Acos.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Acosh.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Asin.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Asinh.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Atan.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Atanh.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/pool_utils.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/AvgPool.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/AvgPoolBackprop.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/BatchNorm.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Clip.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Imag.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Concat.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Conv2D.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Conv2DBackpropFilter.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Conv2DBackpropInput.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Conv3D.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Conv3DBackpropFilterV2.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Conv3DBackpropInputV2.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Cos.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Cosh.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/DepthwiseConv2dNative.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/DepthwiseConv2dNativeBackpropFilter.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/DepthwiseConv2dNativeBackpropInput.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Dilation2D.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Dilation2DBackpropFilter.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Dilation2DBackpropInput.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Div.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Erf.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/fft_utils.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/FFT.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Fill.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/FlipLeftRight.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/FusedConv2D.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/FusedDepthwiseConv2D.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/IFFT.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/IsFinite.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/IsInf.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/IsNaN.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Log1p.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/LogicalNot.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Max.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/MaxPool.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/MaxPoolBackprop.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/MaxPoolWithArgmax.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/MaxPoolWithArgmax_impl.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/MirrorPad.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/NonMaxSuppressionV4.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/NonMaxSuppressionV5.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/PadV2.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Reciprocal.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/RotateWithOffset.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Round.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Selu.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sigmoid.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sign.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sin.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sinh.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Softplus.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Transpose.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/SpaceToBatchND.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sqrt.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Square.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Step.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Tan.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Tanh.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Unique.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/register_all_kernels.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/canvas_util.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/tex_util.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/webgl_util.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/flags_webgl.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernel_utils/shared.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/addn_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/addn_packed_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/argminmax_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/packing_util.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/glsl_version.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/shader_compiler_util.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/shader_compiler.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/argminmax_packed_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/avg_pool_backprop_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/binaryop_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/binaryop_packed_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/clip_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/clip_packed_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/complex_abs_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/conv_backprop_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/conv_backprop_gpu_depthwise.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/conv_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/conv_gpu_depthwise.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/conv_packed_gpu_depthwise.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/crop_and_resize_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/cumsum_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/decode_matrix_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/decode_matrix_packed_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/depth_to_space_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/diag_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/encode_float_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/encode_float_packed_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/encode_matrix_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/encode_matrix_packed_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/fill_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/gather_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/gather_nd_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/gpgpu_util.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/gpgpu_context.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/gpgpu_math.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/im2col_packed_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/lrn_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/lrn_grad_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/lrn_packed_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/max_pool_backprop_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/mulmat_packed_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/multinomial_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/onehot_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/pack_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/pad_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/pad_packed_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/pool_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/reduce_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/reshape_packed_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/resize_bilinear_backprop_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/resize_bilinear_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/resize_bilinear_packed_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/resize_nearest_neighbor_backprop_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/resize_nearest_neighbor_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/reverse_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/reverse_packed_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/scatter_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/segment_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/select_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/slice_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/slice_packed_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/strided_slice_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/texture_manager.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/tile_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/unaryop_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/unaryop_packed_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/unpack_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/backend_webgl.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/base.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Identity.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Complex.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernel_utils/kernel_funcs_utils.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Add.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Atan2.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/AvgPool.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/AvgPoolBackprop.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/batchnorm_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/batchnorm_packed_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/BatchNorm.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/NotEqual.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Real.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Cast.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernel_utils/int.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/concat_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/concat_packed_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Imag.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Reshape.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernel_utils/reshape.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Concat.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Concat_impl.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Cos.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Div.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/fft_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/FFT_impl.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/FFT.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/flip_left_right_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/FlipLeftRight.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/FromPixels_utils/from_pixels_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/FromPixels_utils/from_pixels_packed_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/FromPixels.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/IFFT.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/mean_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernel_utils/reduce.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/transpose_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/transpose_packed_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Transpose_impl.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Max.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Max_impl.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/MaxPool.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/MaxPoolBackprop.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/MaxPoolWithArgmax.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/MaxPoolWithArgmax_impl.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Mean.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Mean_impl.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/mirror_pad_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/mirror_pad_packed_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/MirrorPad.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/binaryop_complex_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Multiply.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/NonMaxSuppressionV3.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/NonMaxSuppressionV4.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/NonMaxSuppressionV5.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/rotate_gpu.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/RotateWithOffset.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Sin.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Square.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/SquaredDifference.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Sub.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Tan.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Unique.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/register_all_kernels.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Transpose.js","../src/version.ts","../src/index.ts","../node_modules/@tensorflow/tfjs-core/dist/version.js","../node_modules/@tensorflow/tfjs-backend-cpu/dist/version.js","../node_modules/@tensorflow/tfjs-backend-webgl/dist/version.js","../node_modules/@tensorflow/tfjs-layers/dist/version.js","../node_modules/@tensorflow/tfjs-converter/dist/version.js","../node_modules/@tensorflow/tfjs-core/dist/ops/tensor5d.js","../node_modules/@tensorflow/tfjs-core/dist/ops/tensor6d.js"],"sourcesContent":["/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nexport const EPSILON_FLOAT32 = 1e-7;\nexport const EPSILON_FLOAT16 = 1e-4;\n/** Convenient class for storing tensor-related data. */\nexport class DataStorage {\n constructor(backend, dataMover) {\n this.backend = backend;\n this.dataMover = dataMover;\n this.data = new WeakMap();\n this.dataIdsCount = 0;\n }\n get(dataId) {\n if (!this.data.has(dataId)) {\n this.dataMover.moveData(this.backend, dataId);\n }\n return this.data.get(dataId);\n }\n set(dataId, value) {\n this.dataIdsCount++;\n this.data.set(dataId, value);\n }\n has(dataId) {\n return this.data.has(dataId);\n }\n delete(dataId) {\n this.dataIdsCount--;\n return this.data.delete(dataId);\n }\n numDataIds() {\n return this.dataIdsCount;\n }\n}\n/**\n * The interface that defines the kernels that should be implemented when\n * adding a new backend. New backends don't need to implement every one of the\n * methods, this can be done gradually (throw an error for unimplemented\n * methods).\n */\nexport class KernelBackend {\n time(f) {\n return notYetImplemented('time');\n }\n read(dataId) {\n return notYetImplemented('read');\n }\n readSync(dataId) {\n return notYetImplemented('readSync');\n }\n numDataIds() {\n return notYetImplemented('numDataIds');\n }\n disposeData(dataId) {\n return notYetImplemented('disposeData');\n }\n write(values, shape, dtype) {\n return notYetImplemented('write');\n }\n move(dataId, values, shape, dtype) {\n return notYetImplemented('move');\n }\n memory() {\n return notYetImplemented('memory');\n }\n /** Returns the highest precision for floats in bits (e.g. 16 or 32) */\n floatPrecision() {\n return notYetImplemented('floatPrecision');\n }\n /** Returns the smallest representable number. */\n epsilon() {\n return this.floatPrecision() === 32 ? EPSILON_FLOAT32 : EPSILON_FLOAT16;\n }\n batchMatMul(a, b, transposeA, transposeB) {\n return notYetImplemented('batchMatMul');\n }\n fusedBatchMatMul({ a, b, transposeA, transposeB, bias, activation, preluActivationWeights }) {\n return notYetImplemented('fusedBatchMatMul');\n }\n slice(x, begin, size) {\n return notYetImplemented('slice');\n }\n stridedSlice(x, begin, end, strides) {\n return notYetImplemented('stridedSlice');\n }\n unstack(x, axis) {\n return notYetImplemented('unstack');\n }\n reverse(a, axis) {\n return notYetImplemented('reverse');\n }\n concat(tensors, axis) {\n return notYetImplemented('concat');\n }\n neg(a) {\n return notYetImplemented('neg');\n }\n add(a, b) {\n return notYetImplemented('add');\n }\n addN(tensors) {\n return notYetImplemented('addN');\n }\n subtract(a, b) {\n return notYetImplemented('subtract');\n }\n multiply(a, b) {\n return notYetImplemented('multiply');\n }\n realDivide(a, b) {\n return notYetImplemented('realDivide');\n }\n floorDiv(a, b) {\n return notYetImplemented('floorDiv');\n }\n sum(x, axes) {\n return notYetImplemented('sum');\n }\n prod(x, axes) {\n return notYetImplemented('prod');\n }\n unsortedSegmentSum(x, segmentIds, numSegments) {\n return notYetImplemented('unsortedSegmentSum');\n }\n argMin(x, axis) {\n return notYetImplemented('argMin');\n }\n argMax(x, axis) {\n return notYetImplemented('argMax');\n }\n equal(a, b) {\n return notYetImplemented('equal');\n }\n notEqual(a, b) {\n return notYetImplemented('notEqual');\n }\n less(a, b) {\n return notYetImplemented('less');\n }\n lessEqual(a, b) {\n return notYetImplemented('lessEqual');\n }\n greater(a, b) {\n return notYetImplemented('greater');\n }\n greaterEqual(a, b) {\n return notYetImplemented('greaterEqual');\n }\n logicalNot(a) {\n return notYetImplemented('logicalNot');\n }\n logicalAnd(a, b) {\n return notYetImplemented('logicalAnd');\n }\n logicalOr(a, b) {\n return notYetImplemented('logicalOr');\n }\n where(condition) {\n return notYetImplemented('where');\n }\n select(condition, a, b) {\n return notYetImplemented('select');\n }\n topk(x, k, sorted) {\n return notYetImplemented('topk');\n }\n min(x, axes) {\n return notYetImplemented('min');\n }\n minimum(a, b) {\n return notYetImplemented('minimum');\n }\n mod(a, b) {\n return notYetImplemented('mod');\n }\n max(x, axes) {\n return notYetImplemented('max');\n }\n maximum(a, b) {\n return notYetImplemented('maximum');\n }\n all(x, axes) {\n return notYetImplemented('all');\n }\n any(x, axes) {\n return notYetImplemented('any');\n }\n squaredDifference(a, b) {\n return notYetImplemented('squaredDifference');\n }\n ceil(x) {\n return notYetImplemented('ceil');\n }\n floor(x) {\n return notYetImplemented('floor');\n }\n round(x) {\n return notYetImplemented('round');\n }\n sign(x) {\n return notYetImplemented('sign');\n }\n isNaN(x) {\n return notYetImplemented('isNaN');\n }\n isInf(x) {\n return notYetImplemented('isInf');\n }\n isFinite(x) {\n return notYetImplemented('isFinite');\n }\n pow(a, b) {\n return notYetImplemented('pow');\n }\n exp(x) {\n return notYetImplemented('exp');\n }\n expm1(x) {\n return notYetImplemented('expm1');\n }\n softmax(x, dim) {\n return notYetImplemented('softmax');\n }\n log(x) {\n return notYetImplemented('log');\n }\n log1p(x) {\n return notYetImplemented('log1p');\n }\n sqrt(x) {\n return notYetImplemented('sqrt');\n }\n rsqrt(x) {\n return notYetImplemented('rsqrt');\n }\n square(x) {\n return notYetImplemented('square');\n }\n reciprocal(x) {\n return notYetImplemented('reciprocal');\n }\n relu(x) {\n return notYetImplemented('relu');\n }\n relu6(x) {\n return notYetImplemented('relu6');\n }\n prelu(x, a) {\n return notYetImplemented('prelu');\n }\n elu(x) {\n return notYetImplemented('elu');\n }\n eluDer(dy, y) {\n return notYetImplemented('eluDer');\n }\n selu(x) {\n return notYetImplemented('selu');\n }\n int(x) {\n return notYetImplemented('int');\n }\n clip(x, min, max) {\n return notYetImplemented('clip');\n }\n abs(x) {\n return notYetImplemented('abs');\n }\n complexAbs(x) {\n return notYetImplemented('complexAbs');\n }\n sigmoid(x) {\n return notYetImplemented('sigmoid');\n }\n softplus(x) {\n return notYetImplemented('softplus');\n }\n sin(x) {\n return notYetImplemented('sin');\n }\n cos(x) {\n return notYetImplemented('cos');\n }\n tan(x) {\n return notYetImplemented('tan');\n }\n asin(x) {\n return notYetImplemented('asin');\n }\n acos(x) {\n return notYetImplemented('acos');\n }\n atan(x) {\n return notYetImplemented('atan');\n }\n atan2(a, b) {\n return notYetImplemented('atan2');\n }\n sinh(x) {\n return notYetImplemented('sinh');\n }\n cosh(x) {\n return notYetImplemented('cosh');\n }\n tanh(x) {\n return notYetImplemented('tanh');\n }\n asinh(x) {\n return notYetImplemented('asinh');\n }\n acosh(x) {\n return notYetImplemented('acosh');\n }\n atanh(x) {\n return notYetImplemented('atanh');\n }\n erf(x) {\n return notYetImplemented('erf');\n }\n step(x, alpha) {\n return notYetImplemented('step');\n }\n fusedConv2d({ input, filter, convInfo, bias, activation, preluActivationWeights }) {\n return notYetImplemented('fusedConv2d');\n }\n conv2d(x, filter, convInfo) {\n return notYetImplemented('conv2d');\n }\n conv2dDerInput(dy, filter, convInfo) {\n return notYetImplemented('conv2dDerInput');\n }\n conv2dDerFilter(x, dY, convInfo) {\n return notYetImplemented('conv2dDerFilter');\n }\n fusedDepthwiseConv2D({ input, filter, convInfo, bias, activation, preluActivationWeights }) {\n return notYetImplemented('fusedDepthwiseConv2D');\n }\n depthwiseConv2D(input, filter, convInfo) {\n return notYetImplemented('depthwiseConv2D');\n }\n depthwiseConv2DDerInput(dy, filter, convInfo) {\n return notYetImplemented('depthwiseConv2DDerInput');\n }\n depthwiseConv2DDerFilter(x, dY, convInfo) {\n return notYetImplemented('depthwiseConv2DDerFilter');\n }\n conv3d(x, filter, convInfo) {\n return notYetImplemented('conv3d');\n }\n conv3dDerInput(dy, filter, convInfo) {\n return notYetImplemented('conv3dDerInput');\n }\n conv3dDerFilter(x, dY, convInfo) {\n return notYetImplemented('conv3dDerFilter');\n }\n maxPool(x, convInfo) {\n return notYetImplemented('maxPool');\n }\n maxPoolBackprop(dy, x, y, convInfo) {\n return notYetImplemented('maxPoolBackprop');\n }\n avgPool(x, convInfo) {\n return notYetImplemented('avgPool');\n }\n avgPoolBackprop(dy, x, convInfo) {\n return notYetImplemented('avgPoolBackprop');\n }\n avgPool3d(x, convInfo) {\n return notYetImplemented('avgPool3d');\n }\n avgPool3dBackprop(dy, x, convInfo) {\n return notYetImplemented('avgPool3dBackprop');\n }\n maxPool3d(x, convInfo) {\n return notYetImplemented('maxPool3d');\n }\n maxPool3dBackprop(dy, x, y, convInfo) {\n return notYetImplemented('maxPool3dBackprop');\n }\n reshape(x, shape) {\n return notYetImplemented('reshape');\n }\n cast(x, dtype) {\n return notYetImplemented('cast');\n }\n tile(x, reps) {\n return notYetImplemented('tile');\n }\n pad(x, paddings, constantValue) {\n return notYetImplemented('pad');\n }\n transpose(x, perm) {\n return notYetImplemented('transpose');\n }\n gather(x, indices, axis) {\n return notYetImplemented('gather');\n }\n gatherND(x, indices) {\n return notYetImplemented('gatherND');\n }\n scatterND(indices, updates, shape) {\n return notYetImplemented('scatterND');\n }\n batchToSpaceND(x, blockShape, crops) {\n return notYetImplemented('batchToSpaceND');\n }\n spaceToBatchND(x, blockShape, paddings) {\n return notYetImplemented('spaceToBatchND');\n }\n resizeBilinear(x, newHeight, newWidth, alignCorners) {\n return notYetImplemented('resizeBilinear');\n }\n resizeBilinearBackprop(dy, x, alignCorners) {\n return notYetImplemented('resizeBilinearBackprop');\n }\n resizeNearestNeighbor(x, newHEight, newWidth, alignCorners) {\n return notYetImplemented('resizeNearestNeighbor');\n }\n resizeNearestNeighborBackprop(dy, x, alignCorners) {\n return notYetImplemented('resizeNearestNeighborBackprop');\n }\n batchNorm(x, mean, variance, offset, scale, varianceEpsilon) {\n return notYetImplemented('batchNorm');\n }\n localResponseNormalization4D(x, radius, bias, alpha, beta) {\n return notYetImplemented('localResponseNormalization4D');\n }\n LRNGrad(dy, inputImage, outputImage, radius, bias, alpha, beta) {\n return notYetImplemented('LRNGrad');\n }\n multinomial(logits, normalized, numSamples, seed) {\n return notYetImplemented('multinomial');\n }\n oneHot(indices, depth, onValue, offValue) {\n return notYetImplemented('oneHot');\n }\n cumsum(x, axis, exclusive, reverse) {\n return notYetImplemented('cumsum');\n }\n nonMaxSuppression(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold) {\n return notYetImplemented('nonMaxSuppression');\n }\n fft(x) {\n return notYetImplemented('fft');\n }\n ifft(x) {\n return notYetImplemented('ifft');\n }\n complex(real, imag) {\n return notYetImplemented('complex');\n }\n real(input) {\n return notYetImplemented('real');\n }\n imag(input) {\n return notYetImplemented('imag');\n }\n cropAndResize(image, boxes, boxIndex, cropSize, method, extrapolationValue) {\n return notYetImplemented('cropAndResize');\n }\n depthToSpace(x, blockSize, dataFormat) {\n return notYetImplemented('depthToSpace');\n }\n // Aligns with the \"SplitV\" kernel in TensorFlow.\n split(value, sizeSplits, axis) {\n return notYetImplemented('split');\n }\n sparseToDense(sparseIndices, sparseValues, outputShape, defaultValue) {\n return notYetImplemented('sparseToDense');\n }\n diag(x) {\n return notYetImplemented('diag');\n }\n fill(shape, value, dtype) {\n return notYetImplemented('fill');\n }\n onesLike(x) {\n return notYetImplemented('onesLike');\n }\n zerosLike(x) {\n return notYetImplemented('zerosLike');\n }\n linspace(start, stop, num) {\n return notYetImplemented('linspace');\n }\n dispose() {\n return notYetImplemented('dispose');\n }\n}\nfunction notYetImplemented(kernelName) {\n throw new Error(`'${kernelName}' not yet implemented or not found in the registry. ` +\n `This kernel may not be supported by the tfjs backend you have chosen`);\n}\n//# sourceMappingURL=backend.js.map","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * Shuffles the array in-place using Fisher-Yates algorithm.\n *\n * ```js\n * const a = [1, 2, 3, 4, 5];\n * tf.util.shuffle(a);\n * console.log(a);\n * ```\n *\n * @param array The array to shuffle in-place.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\n// tslint:disable-next-line:no-any\nexport function shuffle(array) {\n let counter = array.length;\n let temp = 0;\n let index = 0;\n // While there are elements in the array\n while (counter > 0) {\n // Pick a random index\n index = (Math.random() * counter) | 0;\n // Decrease counter by 1\n counter--;\n // And swap the last element with it\n temp = array[counter];\n array[counter] = array[index];\n array[index] = temp;\n }\n}\n/** Clamps a value to a specified range. */\nexport function clamp(min, x, max) {\n return Math.max(min, Math.min(x, max));\n}\nexport function nearestLargerEven(val) {\n return val % 2 === 0 ? val : val + 1;\n}\nexport function sum(arr) {\n let sum = 0;\n for (let i = 0; i < arr.length; i++) {\n sum += arr[i];\n }\n return sum;\n}\n/**\n * Returns a sample from a uniform [a, b) distribution.\n *\n * @param a The minimum support (inclusive).\n * @param b The maximum support (exclusive).\n * @return A pseudorandom number on the half-open interval [a,b).\n */\nexport function randUniform(a, b) {\n const r = Math.random();\n return (b * r) + (1 - r) * a;\n}\n/** Returns the squared Euclidean distance between two vectors. */\nexport function distSquared(a, b) {\n let result = 0;\n for (let i = 0; i < a.length; i++) {\n const diff = Number(a[i]) - Number(b[i]);\n result += diff * diff;\n }\n return result;\n}\n/**\n * Asserts that the expression is true. Otherwise throws an error with the\n * provided message.\n *\n * ```js\n * const x = 2;\n * tf.util.assert(x === 2, 'x is not 2');\n * ```\n *\n * @param expr The expression to assert (as a boolean).\n * @param msg A function that returns the message to report when throwing an\n * error. We use a function for performance reasons.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function assert(expr, msg) {\n if (!expr) {\n throw new Error(typeof msg === 'string' ? msg : msg());\n }\n}\nexport function assertShapesMatch(shapeA, shapeB, errorMessagePrefix = '') {\n assert(arraysEqual(shapeA, shapeB), () => errorMessagePrefix + ` Shapes ${shapeA} and ${shapeB} must match`);\n}\nexport function assertNonNull(a) {\n assert(a != null, () => `The input to the tensor constructor must be a non-null value.`);\n}\n// NOTE: We explicitly type out what T extends instead of any so that\n// util.flatten on a nested array of number doesn't try to infer T as a\n// number[][], causing us to explicitly type util.flatten().\n/**\n * Flattens an arbitrarily nested array.\n *\n * ```js\n * const a = [[1, 2], [3, 4], [5, [6, [7]]]];\n * const flat = tf.util.flatten(a);\n * console.log(flat);\n * ```\n *\n * @param arr The nested array to flatten.\n * @param result The destination array which holds the elements.\n * @param skipTypedArray If true, avoids flattening the typed arrays. Defaults\n * to false.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function flatten(arr, result = [], skipTypedArray = false) {\n if (result == null) {\n result = [];\n }\n if (Array.isArray(arr) || isTypedArray(arr) && !skipTypedArray) {\n for (let i = 0; i < arr.length; ++i) {\n flatten(arr[i], result, skipTypedArray);\n }\n }\n else {\n result.push(arr);\n }\n return result;\n}\n/**\n * Returns the size (number of elements) of the tensor given its shape.\n *\n * ```js\n * const shape = [3, 4, 2];\n * const size = tf.util.sizeFromShape(shape);\n * console.log(size);\n * ```\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function sizeFromShape(shape) {\n if (shape.length === 0) {\n // Scalar.\n return 1;\n }\n let size = shape[0];\n for (let i = 1; i < shape.length; i++) {\n size *= shape[i];\n }\n return size;\n}\nexport function isScalarShape(shape) {\n return shape.length === 0;\n}\nexport function arraysEqual(n1, n2) {\n if (n1 === n2) {\n return true;\n }\n if (n1 == null || n2 == null) {\n return false;\n }\n if (n1.length !== n2.length) {\n return false;\n }\n for (let i = 0; i < n1.length; i++) {\n if (n1[i] !== n2[i]) {\n return false;\n }\n }\n return true;\n}\nexport function isInt(a) {\n return a % 1 === 0;\n}\nexport function tanh(x) {\n // tslint:disable-next-line:no-any\n if (Math.tanh != null) {\n // tslint:disable-next-line:no-any\n return Math.tanh(x);\n }\n if (x === Infinity) {\n return 1;\n }\n else if (x === -Infinity) {\n return -1;\n }\n else {\n const e2x = Math.exp(2 * x);\n return (e2x - 1) / (e2x + 1);\n }\n}\nexport function sizeToSquarishShape(size) {\n const width = Math.ceil(Math.sqrt(size));\n return [width, Math.ceil(size / width)];\n}\n/**\n * Creates a new array with randomized indicies to a given quantity.\n *\n * ```js\n * const randomTen = tf.util.createShuffledIndices(10);\n * console.log(randomTen);\n * ```\n *\n * @param number Quantity of how many shuffled indicies to create.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function createShuffledIndices(n) {\n const shuffledIndices = new Uint32Array(n);\n for (let i = 0; i < n; ++i) {\n shuffledIndices[i] = i;\n }\n shuffle(shuffledIndices);\n return shuffledIndices;\n}\nexport function rightPad(a, size) {\n if (size <= a.length) {\n return a;\n }\n return a + ' '.repeat(size - a.length);\n}\nexport function repeatedTry(checkFn, delayFn = (counter) => 0, maxCounter) {\n return new Promise((resolve, reject) => {\n let tryCount = 0;\n const tryFn = () => {\n if (checkFn()) {\n resolve();\n return;\n }\n tryCount++;\n const nextBackoff = delayFn(tryCount);\n if (maxCounter != null && tryCount >= maxCounter) {\n reject();\n return;\n }\n setTimeout(tryFn, nextBackoff);\n };\n tryFn();\n });\n}\n/**\n * Given the full size of the array and a shape that may contain -1 as the\n * implicit dimension, returns the inferred shape where -1 is replaced.\n * E.g. For shape=[2, -1, 3] and size=24, it will return [2, 4, 3].\n *\n * @param shape The shape, which may contain -1 in some dimension.\n * @param size The full size (number of elements) of the array.\n * @return The inferred shape where -1 is replaced with the inferred size.\n */\nexport function inferFromImplicitShape(shape, size) {\n let shapeProd = 1;\n let implicitIdx = -1;\n for (let i = 0; i < shape.length; ++i) {\n if (shape[i] >= 0) {\n shapeProd *= shape[i];\n }\n else if (shape[i] === -1) {\n if (implicitIdx !== -1) {\n throw Error(`Shapes can only have 1 implicit size. ` +\n `Found -1 at dim ${implicitIdx} and dim ${i}`);\n }\n implicitIdx = i;\n }\n else if (shape[i] < 0) {\n throw Error(`Shapes can not be < 0. Found ${shape[i]} at dim ${i}`);\n }\n }\n if (implicitIdx === -1) {\n if (size > 0 && size !== shapeProd) {\n throw Error(`Size(${size}) must match the product of shape ${shape}`);\n }\n return shape;\n }\n if (shapeProd === 0) {\n throw Error(`Cannot infer the missing size in [${shape}] when ` +\n `there are 0 elements`);\n }\n if (size % shapeProd !== 0) {\n throw Error(`The implicit shape can't be a fractional number. ` +\n `Got ${size} / ${shapeProd}`);\n }\n const newShape = shape.slice();\n newShape[implicitIdx] = size / shapeProd;\n return newShape;\n}\nexport function parseAxisParam(axis, shape) {\n const rank = shape.length;\n // Normalize input\n axis = axis == null ? shape.map((s, i) => i) : [].concat(axis);\n // Check for valid range\n assert(axis.every(ax => ax >= -rank && ax < rank), () => `All values in axis param must be in range [-${rank}, ${rank}) but ` +\n `got axis ${axis}`);\n // Check for only integers\n assert(axis.every(ax => isInt(ax)), () => `All values in axis param must be integers but ` +\n `got axis ${axis}`);\n // Handle negative axis.\n return axis.map(a => a < 0 ? rank + a : a);\n}\n/** Reduces the shape by removing all dimensions of shape 1. */\nexport function squeezeShape(shape, axis) {\n const newShape = [];\n const keptDims = [];\n const isEmptyArray = axis != null && Array.isArray(axis) && axis.length === 0;\n const axes = (axis == null || isEmptyArray) ?\n null :\n parseAxisParam(axis, shape).sort();\n let j = 0;\n for (let i = 0; i < shape.length; ++i) {\n if (axes != null) {\n if (axes[j] === i && shape[i] !== 1) {\n throw new Error(`Can't squeeze axis ${i} since its dim '${shape[i]}' is not 1`);\n }\n if ((axes[j] == null || axes[j] > i) && shape[i] === 1) {\n newShape.push(shape[i]);\n keptDims.push(i);\n }\n if (axes[j] <= i) {\n j++;\n }\n }\n if (shape[i] !== 1) {\n newShape.push(shape[i]);\n keptDims.push(i);\n }\n }\n return { newShape, keptDims };\n}\nexport function getTypedArrayFromDType(dtype, size) {\n let values = null;\n if (dtype == null || dtype === 'float32') {\n values = new Float32Array(size);\n }\n else if (dtype === 'int32') {\n values = new Int32Array(size);\n }\n else if (dtype === 'bool') {\n values = new Uint8Array(size);\n }\n else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n return values;\n}\nexport function getArrayFromDType(dtype, size) {\n let values = null;\n if (dtype == null || dtype === 'float32') {\n values = new Float32Array(size);\n }\n else if (dtype === 'int32') {\n values = new Int32Array(size);\n }\n else if (dtype === 'bool') {\n values = new Uint8Array(size);\n }\n else if (dtype === 'string') {\n values = new Array(size);\n }\n else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n return values;\n}\nexport function checkConversionForErrors(vals, dtype) {\n for (let i = 0; i < vals.length; i++) {\n const num = vals[i];\n if (isNaN(num) || !isFinite(num)) {\n throw Error(`A tensor of type ${dtype} being uploaded contains ${num}.`);\n }\n }\n}\n/** Returns true if the dtype is valid. */\nexport function isValidDtype(dtype) {\n return dtype === 'bool' || dtype === 'complex64' || dtype === 'float32' ||\n dtype === 'int32' || dtype === 'string';\n}\n/**\n * Returns true if the new type can't encode the old type without loss of\n * precision.\n */\nexport function hasEncodingLoss(oldType, newType) {\n if (newType === 'complex64') {\n return false;\n }\n if (newType === 'float32' && oldType !== 'complex64') {\n return false;\n }\n if (newType === 'int32' && oldType !== 'float32' && oldType !== 'complex64') {\n return false;\n }\n if (newType === 'bool' && oldType === 'bool') {\n return false;\n }\n return true;\n}\nexport function isTypedArray(a) {\n return a instanceof Float32Array || a instanceof Int32Array ||\n a instanceof Uint8Array;\n}\nexport function bytesPerElement(dtype) {\n if (dtype === 'float32' || dtype === 'int32') {\n return 4;\n }\n else if (dtype === 'complex64') {\n return 8;\n }\n else if (dtype === 'bool') {\n return 1;\n }\n else {\n throw new Error(`Unknown dtype ${dtype}`);\n }\n}\n/**\n * Returns the approximate number of bytes allocated in the string array - 2\n * bytes per character. Computing the exact bytes for a native string in JS is\n * not possible since it depends on the encoding of the html page that serves\n * the website.\n */\nexport function bytesFromStringArray(arr) {\n if (arr == null) {\n return 0;\n }\n let bytes = 0;\n arr.forEach(x => bytes += x.length);\n return bytes;\n}\n/** Returns true if the value is a string. */\nexport function isString(value) {\n return typeof value === 'string' || value instanceof String;\n}\nexport function isBoolean(value) {\n return typeof value === 'boolean';\n}\nexport function isNumber(value) {\n return typeof value === 'number';\n}\nexport function inferDtype(values) {\n if (Array.isArray(values)) {\n return inferDtype(values[0]);\n }\n if (values instanceof Float32Array) {\n return 'float32';\n }\n else if (values instanceof Int32Array || values instanceof Uint8Array) {\n return 'int32';\n }\n else if (isNumber(values)) {\n return 'float32';\n }\n else if (isString(values)) {\n return 'string';\n }\n else if (isBoolean(values)) {\n return 'bool';\n }\n return 'float32';\n}\nexport function isFunction(f) {\n return !!(f && f.constructor && f.call && f.apply);\n}\nexport function nearestDivisor(size, start) {\n for (let i = start; i < size; ++i) {\n if (size % i === 0) {\n return i;\n }\n }\n return size;\n}\nexport function computeStrides(shape) {\n const rank = shape.length;\n if (rank < 2) {\n return [];\n }\n // Last dimension has implicit stride of 1, thus having D-1 (instead of D)\n // strides.\n const strides = new Array(rank - 1);\n strides[rank - 2] = shape[rank - 1];\n for (let i = rank - 3; i >= 0; --i) {\n strides[i] = strides[i + 1] * shape[i + 1];\n }\n return strides;\n}\nfunction createNestedArray(offset, shape, a) {\n const ret = new Array();\n if (shape.length === 1) {\n const d = shape[0];\n for (let i = 0; i < d; i++) {\n ret[i] = a[offset + i];\n }\n }\n else {\n const d = shape[0];\n const rest = shape.slice(1);\n const len = rest.reduce((acc, c) => acc * c);\n for (let i = 0; i < d; i++) {\n ret[i] = createNestedArray(offset + i * len, rest, a);\n }\n }\n return ret;\n}\n// Provide a nested array of TypedArray in given shape.\nexport function toNestedArray(shape, a) {\n if (shape.length === 0) {\n // Scalar type should return a single number.\n return a[0];\n }\n const size = shape.reduce((acc, c) => acc * c);\n if (size === 0) {\n // A tensor with shape zero should be turned into empty list.\n return [];\n }\n if (size !== a.length) {\n throw new Error(`[${shape}] does not match the input size ${a.length}.`);\n }\n return createNestedArray(0, shape, a);\n}\nexport function makeOnesTypedArray(size, dtype) {\n const array = makeZerosTypedArray(size, dtype);\n for (let i = 0; i < array.length; i++) {\n array[i] = 1;\n }\n return array;\n}\nexport function makeZerosTypedArray(size, dtype) {\n if (dtype == null || dtype === 'float32' || dtype === 'complex64') {\n return new Float32Array(size);\n }\n else if (dtype === 'int32') {\n return new Int32Array(size);\n }\n else if (dtype === 'bool') {\n return new Uint8Array(size);\n }\n else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n}\n/**\n * Make nested `TypedArray` filled with zeros.\n * @param shape The shape information for the nested array.\n * @param dtype dtype of the array element.\n */\nexport function makeZerosNestedTypedArray(shape, dtype) {\n const size = shape.reduce((prev, curr) => prev * curr, 1);\n if (dtype == null || dtype === 'float32') {\n return toNestedArray(shape, new Float32Array(size));\n }\n else if (dtype === 'int32') {\n return toNestedArray(shape, new Int32Array(size));\n }\n else if (dtype === 'bool') {\n return toNestedArray(shape, new Uint8Array(size));\n }\n else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n}\nexport function assertNonNegativeIntegerDimensions(shape) {\n shape.forEach(dimSize => {\n assert(Number.isInteger(dimSize) && dimSize >= 0, () => `Tensor must have a shape comprised of positive integers but got ` +\n `shape [${shape}].`);\n });\n}\n/**\n * Computes flat index for a given location (multidimentionsal index) in a\n * Tensor/multidimensional array.\n *\n * @param locs Location in the tensor.\n * @param rank Rank of the tensor.\n * @param strides Tensor strides.\n */\nexport function locToIndex(locs, rank, strides) {\n if (rank === 0) {\n return 0;\n }\n else if (rank === 1) {\n return locs[0];\n }\n let index = locs[locs.length - 1];\n for (let i = 0; i < locs.length - 1; ++i) {\n index += strides[i] * locs[i];\n }\n return index;\n}\n/**\n * Computes the location (multidimensional index) in a tensor/multidimentional\n * array for a given flat index.\n *\n * @param index Index in flat array.\n * @param rank Rank of tensor.\n * @param strides Strides of tensor.\n */\nexport function indexToLoc(index, rank, strides) {\n if (rank === 0) {\n return [];\n }\n else if (rank === 1) {\n return [index];\n }\n const locs = new Array(rank);\n for (let i = 0; i < locs.length - 1; ++i) {\n locs[i] = Math.floor(index / strides[i]);\n index -= locs[i] * strides[i];\n }\n locs[locs.length - 1] = index;\n return locs;\n}\n/**\n * This method asserts whether an object is a Promise instance.\n * @param object\n */\n// tslint:disable-next-line: no-any\nexport function isPromise(object) {\n // We chose to not use 'obj instanceOf Promise' for two reasons:\n // 1. It only reliably works for es6 Promise, not other Promise\n // implementations.\n // 2. It doesn't work with framework that uses zone.js. zone.js monkey patch\n // the async calls, so it is possible the obj (patched) is comparing to a\n // pre-patched Promise.\n return object && object.then && typeof object.then === 'function';\n}\n//# sourceMappingURL=util_base.js.map","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { isPromise } from './util_base';\n// Expects flags from URL in the format ?tfjsflags=FLAG1:1,FLAG2:true.\nconst TENSORFLOWJS_FLAGS_PREFIX = 'tfjsflags';\n/**\n * The environment contains evaluated flags as well as the registered platform.\n * This is always used as a global singleton and can be retrieved with\n * `tf.env()`.\n *\n * @doc {heading: 'Environment'}\n */\nexport class Environment {\n // tslint:disable-next-line: no-any\n constructor(global) {\n this.global = global;\n this.flags = {};\n this.flagRegistry = {};\n this.urlFlags = {};\n this.populateURLFlags();\n }\n setPlatform(platformName, platform) {\n if (this.platform != null) {\n console.warn(`Platform ${this.platformName} has already been set. ` +\n `Overwriting the platform with ${platform}.`);\n }\n this.platformName = platformName;\n this.platform = platform;\n }\n registerFlag(flagName, evaluationFn, setHook) {\n this.flagRegistry[flagName] = { evaluationFn, setHook };\n // Override the flag value from the URL. This has to happen here because the\n // environment is initialized before flags get registered.\n if (this.urlFlags[flagName] != null) {\n const flagValue = this.urlFlags[flagName];\n console.warn(`Setting feature override from URL ${flagName}: ${flagValue}.`);\n this.set(flagName, flagValue);\n }\n }\n async getAsync(flagName) {\n if (flagName in this.flags) {\n return this.flags[flagName];\n }\n this.flags[flagName] = await this.evaluateFlag(flagName);\n return this.flags[flagName];\n }\n get(flagName) {\n if (flagName in this.flags) {\n return this.flags[flagName];\n }\n const flagValue = this.evaluateFlag(flagName);\n if (isPromise(flagValue)) {\n throw new Error(`Flag ${flagName} cannot be synchronously evaluated. ` +\n `Please use getAsync() instead.`);\n }\n this.flags[flagName] = flagValue;\n return this.flags[flagName];\n }\n getNumber(flagName) {\n return this.get(flagName);\n }\n getBool(flagName) {\n return this.get(flagName);\n }\n getFlags() {\n return this.flags;\n }\n // For backwards compatibility.\n get features() {\n return this.flags;\n }\n set(flagName, value) {\n if (this.flagRegistry[flagName] == null) {\n throw new Error(`Cannot set flag ${flagName} as it has not been registered.`);\n }\n this.flags[flagName] = value;\n if (this.flagRegistry[flagName].setHook != null) {\n this.flagRegistry[flagName].setHook(value);\n }\n }\n evaluateFlag(flagName) {\n if (this.flagRegistry[flagName] == null) {\n throw new Error(`Cannot evaluate flag '${flagName}': no evaluation function found.`);\n }\n return this.flagRegistry[flagName].evaluationFn();\n }\n setFlags(flags) {\n this.flags = Object.assign({}, flags);\n }\n reset() {\n this.flags = {};\n this.urlFlags = {};\n this.populateURLFlags();\n }\n populateURLFlags() {\n if (typeof this.global === 'undefined' ||\n typeof this.global.location === 'undefined' ||\n typeof this.global.location.search === 'undefined') {\n return;\n }\n const urlParams = getQueryParams(this.global.location.search);\n if (TENSORFLOWJS_FLAGS_PREFIX in urlParams) {\n const keyValues = urlParams[TENSORFLOWJS_FLAGS_PREFIX].split(',');\n keyValues.forEach(keyValue => {\n const [key, value] = keyValue.split(':');\n this.urlFlags[key] = parseValue(key, value);\n });\n }\n }\n}\nexport function getQueryParams(queryString) {\n const params = {};\n queryString.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g, (s, ...t) => {\n decodeParam(params, t[0], t[1]);\n return t.join('=');\n });\n return params;\n}\nfunction decodeParam(params, name, value) {\n params[decodeURIComponent(name)] = decodeURIComponent(value || '');\n}\nfunction parseValue(flagName, value) {\n value = value.toLowerCase();\n if (value === 'true' || value === 'false') {\n return value === 'true';\n }\n else if (`${+value}` === value) {\n return +value;\n }\n throw new Error(`Could not parse value flag value ${value} for flag ${flagName}.`);\n}\n/**\n * Returns the current environment (a global singleton).\n *\n * The environment object contains the evaluated feature values as well as the\n * active platform.\n *\n * @doc {heading: 'Environment'}\n */\nexport function env() {\n return ENV;\n}\nexport let ENV = null;\nexport function setEnvironmentGlobal(environment) {\n ENV = environment;\n}\n//# sourceMappingURL=environment.js.map","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// Note that the identifier globalNameSpace is scoped to this module, but will\n// always resolve to the same global object regardless of how the module is\n// resolved.\n// tslint:disable-next-line:no-any\nlet globalNameSpace;\n// tslint:disable-next-line:no-any\nexport function getGlobalNamespace() {\n if (globalNameSpace == null) {\n // tslint:disable-next-line:no-any\n let ns;\n if (typeof (window) !== 'undefined') {\n ns = window;\n }\n else if (typeof (global) !== 'undefined') {\n ns = global;\n }\n else if (typeof (process) !== 'undefined') {\n ns = process;\n }\n else if (typeof (self) !== 'undefined') {\n ns = self;\n }\n else {\n throw new Error('Could not find a global object');\n }\n globalNameSpace = ns;\n }\n return globalNameSpace;\n}\n// tslint:disable-next-line:no-any\nfunction getGlobalMap() {\n const ns = getGlobalNamespace();\n if (ns._tfGlobals == null) {\n ns._tfGlobals = new Map();\n }\n return ns._tfGlobals;\n}\n/**\n * Returns a globally accessible 'singleton' object.\n *\n * @param key the name of the object\n * @param init a function to initialize to initialize this object\n * the first time it is fetched.\n */\nexport function getGlobal(key, init) {\n const globalMap = getGlobalMap();\n if (globalMap.has(key)) {\n return globalMap.get(key);\n }\n else {\n const singleton = init();\n globalMap.set(key, singleton);\n return globalMap.get(key);\n }\n}\n//# sourceMappingURL=global_util.js.map","export const Abs = 'Abs';\nexport const Acos = 'Acos';\nexport const Acosh = 'Acosh';\nexport const Add = 'Add';\nexport const AddN = 'AddN';\nexport const All = 'All';\nexport const Any = 'Any';\nexport const ArgMax = 'ArgMax';\nexport const ArgMin = 'ArgMin';\nexport const Asin = 'Asin';\nexport const Asinh = 'Asinh';\nexport const Atan = 'Atan';\nexport const Atanh = 'Atanh';\nexport const Atan2 = 'Atan2';\nexport const AvgPool = 'AvgPool';\nexport const AvgPoolBackprop = 'AvgPoolBackprop';\nexport const AvgPool3D = 'AvgPool3D';\nexport const AvgPool3DBackprop = 'AvgPool3DBackprop';\nexport const BatchMatMul = 'BatchMatMul';\nexport const BatchToSpaceND = 'BatchToSpaceND';\nexport const BroadcastTo = 'BroadcastTo';\nexport const Cast = 'Cast';\nexport const Ceil = 'Ceil';\nexport const ClipByValue = 'ClipByValue';\nexport const Complex = 'Complex';\nexport const Concat = 'Concat';\nexport const Conv2D = 'Conv2D';\nexport const Conv2DBackpropFilter = 'Conv2DBackpropFilter';\nexport const Conv2DBackpropInput = 'Conv2DBackpropInput';\nexport const Conv3D = 'Conv3D';\nexport const Conv3DBackpropFilterV2 = 'Conv3DBackpropFilterV2';\nexport const Conv3DBackpropInputV2 = 'Conv3DBackpropInputV2';\nexport const Cos = 'Cos';\nexport const Cosh = 'Cosh';\nexport const Cumsum = 'Cumsum';\nexport const CropAndResize = 'CropAndResize';\nexport const DepthToSpace = 'DepthToSpace';\nexport const DepthwiseConv2dNative = 'DepthwiseConv2dNative';\nexport const DepthwiseConv2dNativeBackpropFilter = 'DepthwiseConv2dNativeBackpropFilter';\nexport const DepthwiseConv2dNativeBackpropInput = 'DepthwiseConv2dNativeBackpropInput';\nexport const Diag = 'Diag';\nexport const Dilation2D = 'Dilation2D';\nexport const Dilation2DBackpropInput = 'Dilation2DBackpropInput';\nexport const Dilation2DBackpropFilter = 'Dilation2DBackpropFilter';\nexport const Div = 'Div';\nexport const Elu = 'Elu';\nexport const EluGrad = 'EluGrad';\nexport const Erf = 'Erf';\nexport const Equal = 'Equal';\nexport const Exp = 'Exp';\nexport const Expm1 = 'Expm1';\nexport const FFT = 'FFT';\nexport const Fill = 'Fill';\nexport const FlipLeftRight = 'FlipLeftRight';\nexport const Floor = 'Floor';\nexport const FloorDiv = 'FloorDiv';\nexport const FusedBatchNorm = 'FusedBatchNorm';\nexport const GatherV2 = 'GatherV2';\nexport const GatherNd = 'GatherNd';\nexport const Greater = 'Greater';\nexport const GreaterEqual = 'GreaterEqual';\nexport const Identity = 'Identity';\nexport const IFFT = 'IFFT';\nexport const Imag = 'Imag';\nexport const IsFinite = 'IsFinite';\nexport const IsInf = 'IsInf';\nexport const IsNan = 'IsNan';\nexport const Less = 'Less';\nexport const LessEqual = 'LessEqual';\nexport const LinSpace = 'LinSpace';\nexport const Log = 'Log';\nexport const Log1p = 'Log1p';\nexport const LogicalAnd = 'LogicalAnd';\nexport const LogicalNot = 'LogicalNot';\nexport const LogicalOr = 'LogicalOr';\nexport const LogSoftmax = 'LogSoftmax';\nexport const LRN = 'LRN';\nexport const LRNBackprop = 'LRNBackprop';\nexport const Max = 'Max';\nexport const Maximum = 'Maximum';\nexport const MaxPool = 'MaxPool';\nexport const MaxPoolBackprop = 'MaxPoolBackprop';\nexport const MaxPool3D = 'MaxPool3D';\nexport const MaxPool3DBackprop = 'MaxPool3DBackprop';\nexport const MaxPoolWithArgmax = 'MaxPoolWithArgmax';\nexport const Mean = 'Mean';\nexport const Min = 'Min';\nexport const Minimum = 'Minimum';\nexport const MirrorPad = 'MirrorPad';\nexport const Mod = 'Mod';\nexport const Multiply = 'Multiply';\nexport const Negate = 'Negate';\nexport const NotEqual = 'NotEqual';\nexport const NonMaxSuppressionV3 = 'NonMaxSuppressionV3';\nexport const NonMaxSuppressionV4 = 'NonMaxSuppressionV4';\nexport const NonMaxSuppressionV5 = 'NonMaxSuppressionV5';\nexport const OnesLike = 'OnesLike';\nexport const OneHot = 'OneHot';\nexport const PadV2 = 'PadV2';\nexport const Pool = 'Pool';\nexport const Pow = 'Pow';\nexport const Prelu = 'Prelu';\nexport const Prod = 'Prod';\nexport const Range = 'Range';\nexport const Real = 'Real';\nexport const Reciprocal = 'Reciprocal';\nexport const Relu = 'Relu';\nexport const Reshape = 'Reshape';\nexport const ResizeNearestNeighbor = 'ResizeNearestNeighbor';\nexport const ResizeNearestNeighborGrad = 'ResizeNearestNeighborGrad';\nexport const ResizeBilinear = 'ResizeBilinear';\nexport const ResizeBilinearGrad = 'ResizeBilinearGrad';\nexport const Relu6 = 'Relu6';\nexport const Reverse = 'Reverse';\nexport const Round = 'Round';\nexport const Rsqrt = 'Rsqrt';\nexport const ScatterNd = 'ScatterNd';\nexport const SelectV2 = 'SelectV2';\nexport const Selu = 'Selu';\nexport const Slice = 'Slice';\nexport const Sin = 'Sin';\nexport const Sinh = 'Sinh';\nexport const Sign = 'Sign';\nexport const Sigmoid = 'Sigmoid';\nexport const Softplus = 'Softplus';\nexport const Sqrt = 'Sqrt';\nexport const Sum = 'Sum';\nexport const SpaceToBatchND = 'SpaceToBatchND';\nexport const SplitV = 'SplitV';\nexport const Softmax = 'Softmax';\nexport const SquaredDifference = 'SquaredDifference';\nexport const Square = 'Square';\nexport const Sub = 'Sub';\nexport const SparseToDense = 'SparseToDense';\nexport const StridedSlice = 'StridedSlice';\nexport const Tan = 'Tan';\nexport const Tanh = 'Tanh';\nexport const Tile = 'Tile';\nexport const TopK = 'TopK';\nexport const Transpose = 'Transpose';\nexport const Unique = 'Unique';\nexport const Unpack = 'Unpack';\nexport const UnsortedSegmentSum = 'UnsortedSegmentSum';\nexport const ZerosLike = 'ZerosLike';\n/**\n * TensorFlow.js-only kernels\n */\nexport const Step = 'Step';\nexport const FromPixels = 'FromPixels';\nexport const RotateWithOffset = 'RotateWithOffset';\nexport const _FusedMatMul = '_FusedMatMul';\nexport const FusedConv2D = 'FusedConv2D';\nexport const FusedDepthwiseConv2D = 'FusedDepthwiseConv2D';\n//# sourceMappingURL=kernel_names.js.map","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { env } from './environment';\nimport { getGlobal } from './global_util';\nconst kernelRegistry = getGlobal('kernelRegistry', () => new Map());\nconst gradRegistry = getGlobal('gradRegistry', () => new Map());\n/**\n * Returns the kernel function (code) associated with the provided names.\n *\n * @param kernelName The official name of the kernel.\n * @param backendName The official name of the backend.\n */\nexport function getKernel(kernelName, backendName) {\n const key = makeKey(kernelName, backendName);\n return kernelRegistry.get(key);\n}\n/**\n * Returns the registered gradient info associated with the provided kernel.\n * @param kernelName The official TF kernel name.\n */\nexport function getGradient(kernelName) {\n return gradRegistry.get(kernelName);\n}\nexport function getKernelsForBackend(backendName) {\n const it = kernelRegistry.entries();\n const result = [];\n while (true) {\n const { done, value } = it.next();\n if (done) {\n break;\n }\n const [key, config] = value;\n const [backend,] = key.split('_');\n if (backend === backendName) {\n result.push(config);\n }\n }\n return result;\n}\n/**\n * Registers the function (forward pass) for the kernel in a global registry.\n *\n * @param config A config object with the following properties:\n * - `kernelName` The official name of the kernel.\n * - `backendName` The official name of the backend.\n * - `kernelFunc` The function to run during the forward pass of the kernel.\n * - `setupFunc` Optional. Gets called once, after the backend initializes.\n * - `disposeFunc` Optional. Gets called once, right before the backend is\n * disposed.\n */\nexport function registerKernel(config) {\n const { kernelName, backendName } = config;\n const key = makeKey(kernelName, backendName);\n if (kernelRegistry.has(key)) {\n console.warn(`The kernel '${kernelName}' for backend ` +\n `'${backendName}' is already registered`);\n }\n kernelRegistry.set(key, config);\n}\n/**\n * Registers a gradient function for a given kernel in the global registry,\n * to be used during the back-propagation of that kernel.\n *\n * @param config An object with the following properties:\n * - `kernelName` The name of the kernel that the gradient function is for.\n * - `gradFunc` The function to run during back-propagation.\n */\nexport function registerGradient(config) {\n const { kernelName } = config;\n if (gradRegistry.has(kernelName)) {\n // TODO (yassogba) after 3.0 assess whether we need to keep this gated\n // to debug mode.\n if (env().getBool('DEBUG')) {\n console.warn(`Overriding the gradient for '${kernelName}'`);\n }\n }\n gradRegistry.set(kernelName, config);\n}\n/**\n * Removes the kernel function from the registry.\n *\n * @param kernelName The official name of the kernel.\n * @param backendName The official name of the backend.\n *\n */\nexport function unregisterKernel(kernelName, backendName) {\n const key = makeKey(kernelName, backendName);\n if (!kernelRegistry.has(key)) {\n throw new Error(`The kernel '${kernelName}' for backend ` +\n `'${backendName}' is not registered`);\n }\n kernelRegistry.delete(key);\n}\n/** Removes the registered gradient from the global registry. */\nexport function unregisterGradient(kernelName) {\n if (!gradRegistry.has(kernelName)) {\n throw new Error(`The gradient '${kernelName}' for backend is not registered`);\n }\n gradRegistry.delete(kernelName);\n}\n/**\n * Finds kernels that have already been registered to a backend and re-registers\n * them for a new backend. Useful for registering custom backends.\n * @param registeredBackendName Already registered backend.\n * @param newBackendName New backend.\n */\nexport function copyRegisteredKernels(registeredBackendName, newBackendName) {\n const kernels = getKernelsForBackend(registeredBackendName);\n kernels.forEach(kernelConfig => {\n const newKernelConfig = Object.assign({}, kernelConfig, { backendName: newBackendName });\n registerKernel(newKernelConfig);\n });\n}\nfunction makeKey(kernelName, backendName) {\n return `${backendName}_${kernelName}`;\n}\n//# sourceMappingURL=kernel_registry.js.map","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { env } from './environment';\nimport * as base from './util_base';\nexport * from './util_base';\n/**\n * Create typed array for scalar value. Used for storing in `DataStorage`.\n */\nexport function createScalarValue(value, dtype) {\n if (dtype === 'string') {\n return encodeString(value);\n }\n return toTypedArray([value], dtype);\n}\nfunction noConversionNeeded(a, dtype) {\n return (a instanceof Float32Array && dtype === 'float32') ||\n (a instanceof Int32Array && dtype === 'int32') ||\n (a instanceof Uint8Array && dtype === 'bool');\n}\nexport function toTypedArray(a, dtype) {\n if (dtype === 'string') {\n throw new Error('Cannot convert a string[] to a TypedArray');\n }\n if (Array.isArray(a)) {\n a = base.flatten(a);\n }\n if (env().getBool('DEBUG')) {\n base.checkConversionForErrors(a, dtype);\n }\n if (noConversionNeeded(a, dtype)) {\n return a;\n }\n if (dtype == null || dtype === 'float32' || dtype === 'complex64') {\n return new Float32Array(a);\n }\n else if (dtype === 'int32') {\n return new Int32Array(a);\n }\n else if (dtype === 'bool') {\n const bool = new Uint8Array(a.length);\n for (let i = 0; i < bool.length; ++i) {\n if (Math.round(a[i]) !== 0) {\n bool[i] = 1;\n }\n }\n return bool;\n }\n else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n}\n/**\n * Returns the current high-resolution time in milliseconds relative to an\n * arbitrary time in the past. It works across different platforms (node.js,\n * browsers).\n *\n * ```js\n * console.log(tf.util.now());\n * ```\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function now() {\n return env().platform.now();\n}\n/**\n * Returns a platform-specific implementation of\n * [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API).\n *\n * If `fetch` is defined on the global object (`window`, `process`, etc.),\n * `tf.util.fetch` returns that function.\n *\n * If not, `tf.util.fetch` returns a platform-specific solution.\n *\n * ```js\n * const resource = await tf.util.fetch('https://unpkg.com/@tensorflow/tfjs');\n * // handle response\n * ```\n *\n * @doc {heading: 'Util'}\n */\nexport function fetch(path, requestInits) {\n return env().platform.fetch(path, requestInits);\n}\n/**\n * Encodes the provided string into bytes using the provided encoding scheme.\n *\n * @param s The string to encode.\n * @param encoding The encoding scheme. Defaults to utf-8.\n *\n * @doc {heading: 'Util'}\n */\nexport function encodeString(s, encoding = 'utf-8') {\n encoding = encoding || 'utf-8';\n return env().platform.encode(s, encoding);\n}\n/**\n * Decodes the provided bytes into a string using the provided encoding scheme.\n * @param bytes The bytes to decode.\n *\n * @param encoding The encoding scheme. Defaults to utf-8.\n *\n * @doc {heading: 'Util'}\n */\nexport function decodeString(bytes, encoding = 'utf-8') {\n encoding = encoding || 'utf-8';\n return env().platform.decode(bytes, encoding);\n}\n//# sourceMappingURL=util.js.map","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport * as util from './util';\nexport class Profiler {\n constructor(backendTimer, logger) {\n this.backendTimer = backendTimer;\n this.logger = logger;\n if (logger == null) {\n this.logger = new Logger();\n }\n }\n profileKernel(kernelName, inputs, f) {\n let outputs;\n const holdResultWrapperFn = () => {\n outputs = f();\n };\n const timer = this.backendTimer.time(holdResultWrapperFn);\n for (let i = 0; i < outputs.length; i++) {\n const output = outputs[i];\n // Dangling promise here because we don't want to propagate up\n // asynchronicity.\n output.data().then(tensorVals => {\n checkComputationForErrors(tensorVals, output.dtype, kernelName);\n });\n }\n const kernelProfile = {\n kernelName,\n outputs,\n inputs,\n timeMs: timer.then(timing => timing.kernelMs),\n extraInfo: timer.then(timing => timing.getExtraProfileInfo != null ?\n timing.getExtraProfileInfo() :\n '')\n };\n return kernelProfile;\n }\n logKernelProfile(kernelProfile) {\n const { kernelName, outputs, timeMs, inputs, extraInfo } = kernelProfile;\n outputs.forEach(result => {\n Promise.all([result.data(), timeMs, extraInfo]).then(valueContainer => {\n this.logger.logKernelProfile(kernelName, result, valueContainer[0], valueContainer[1], inputs, valueContainer[2]);\n });\n });\n }\n}\nexport function checkComputationForErrors(vals, dtype, kernelName) {\n if (dtype !== 'float32') {\n // Only floating point computations will generate NaN values\n return false;\n }\n for (let i = 0; i < vals.length; i++) {\n const num = vals[i];\n if (isNaN(num) || !isFinite(num)) {\n // Throwing custom exception so behavior is testable.\n console.warn(`Found ${num} in the result of '${kernelName}'`);\n return true;\n }\n }\n return false;\n}\nexport class Logger {\n logKernelProfile(name, result, vals, timeMs, inputs, extraInfo) {\n const time = typeof timeMs === 'number' ? util.rightPad(`${timeMs}ms`, 9) :\n timeMs['error'];\n const paddedName = util.rightPad(name, 25);\n const rank = result.rank;\n const size = result.size;\n const shape = util.rightPad(result.shape.toString(), 14);\n let inputShapesDescription = '';\n for (const name in inputs) {\n const input = inputs[name];\n if (input != null) {\n // The input might be a non-tensor (e.g HTMLImageElement), in which case\n // we claim the output shape as input shape.\n const inputShape = input.shape || result.shape;\n const inputRank = inputShape.length;\n inputShapesDescription +=\n `${name}: ${inputRank}D ${inputRank > 0 ? inputShape : ''} `;\n }\n }\n console.log(`%c${paddedName}\\t%c${time}\\t%c${rank}D ${shape}\\t%c${size}\\t%c${inputShapesDescription}\\t%c${extraInfo}`, 'font-weight:bold', 'color:red', 'color:blue', 'color: orange', 'color: green', 'color: steelblue');\n }\n}\n//# sourceMappingURL=profiler.js.map","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { computeStrides, isString, rightPad, sizeFromShape } from './util';\n// Maximum number of values before we decide to show ellipsis.\nconst FORMAT_LIMIT_NUM_VALS = 20;\n// Number of first and last values to show when displaying a, b,...,y, z.\nconst FORMAT_NUM_FIRST_LAST_VALS = 3;\n// Number of significant digits to show.\nconst FORMAT_NUM_SIG_DIGITS = 7;\nexport function tensorToString(vals, shape, dtype, verbose) {\n const strides = computeStrides(shape);\n const padPerCol = computeMaxSizePerColumn(vals, shape, dtype, strides);\n const rank = shape.length;\n const valsLines = subTensorToString(vals, shape, dtype, strides, padPerCol);\n const lines = ['Tensor'];\n if (verbose) {\n lines.push(` dtype: ${dtype}`);\n lines.push(` rank: ${rank}`);\n lines.push(` shape: [${shape}]`);\n lines.push(` values:`);\n }\n lines.push(valsLines.map(l => ' ' + l).join('\\n'));\n return lines.join('\\n');\n}\nfunction computeMaxSizePerColumn(vals, shape, dtype, strides) {\n const n = sizeFromShape(shape);\n const numCols = strides[strides.length - 1];\n const padPerCol = new Array(numCols).fill(0);\n const rank = shape.length;\n const valuesOrTuples = dtype === 'complex64' ? createComplexTuples(vals) : vals;\n if (rank > 1) {\n for (let row = 0; row < n / numCols; row++) {\n const offset = row * numCols;\n for (let j = 0; j < numCols; j++) {\n padPerCol[j] = Math.max(padPerCol[j], valToString(valuesOrTuples[offset + j], 0, dtype).length);\n }\n }\n }\n return padPerCol;\n}\nfunction valToString(val, pad, dtype) {\n let valStr;\n if (Array.isArray(val)) {\n valStr = `${parseFloat(val[0].toFixed(FORMAT_NUM_SIG_DIGITS))} + ` +\n `${parseFloat(val[1].toFixed(FORMAT_NUM_SIG_DIGITS))}j`;\n }\n else if (isString(val)) {\n valStr = `'${val}'`;\n }\n else if (dtype === 'bool') {\n valStr = boolNumToString(val);\n }\n else {\n valStr = parseFloat(val.toFixed(FORMAT_NUM_SIG_DIGITS)).toString();\n }\n return rightPad(valStr, pad);\n}\nfunction boolNumToString(v) {\n return v === 0 ? 'false' : 'true';\n}\nfunction subTensorToString(vals, shape, dtype, strides, padPerCol, isLast = true) {\n const storagePerElement = dtype === 'complex64' ? 2 : 1;\n const size = shape[0];\n const rank = shape.length;\n if (rank === 0) {\n if (dtype === 'complex64') {\n const complexTuple = createComplexTuples(vals);\n return [valToString(complexTuple[0], 0, dtype)];\n }\n if (dtype === 'bool') {\n return [boolNumToString(vals[0])];\n }\n return [vals[0].toString()];\n }\n if (rank === 1) {\n if (size > FORMAT_LIMIT_NUM_VALS) {\n const firstValsSize = FORMAT_NUM_FIRST_LAST_VALS * storagePerElement;\n let firstVals = Array.from(vals.slice(0, firstValsSize));\n let lastVals = Array.from(vals.slice((size - FORMAT_NUM_FIRST_LAST_VALS) * storagePerElement, size * storagePerElement));\n if (dtype === 'complex64') {\n firstVals = createComplexTuples(firstVals);\n lastVals = createComplexTuples(lastVals);\n }\n return [\n '[' +\n firstVals.map((x, i) => valToString(x, padPerCol[i], dtype))\n .join(', ') +\n ', ..., ' +\n lastVals\n .map((x, i) => valToString(x, padPerCol[size - FORMAT_NUM_FIRST_LAST_VALS + i], dtype))\n .join(', ') +\n ']'\n ];\n }\n const displayVals = dtype === 'complex64' ? createComplexTuples(vals) :\n Array.from(vals);\n return [\n '[' +\n displayVals.map((x, i) => valToString(x, padPerCol[i], dtype))\n .join(', ') +\n ']'\n ];\n }\n // The array is rank 2 or more.\n const subshape = shape.slice(1);\n const substrides = strides.slice(1);\n const stride = strides[0] * storagePerElement;\n const lines = [];\n if (size > FORMAT_LIMIT_NUM_VALS) {\n for (let i = 0; i < FORMAT_NUM_FIRST_LAST_VALS; i++) {\n const start = i * stride;\n const end = start + stride;\n lines.push(...subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, false /* isLast */));\n }\n lines.push('...');\n for (let i = size - FORMAT_NUM_FIRST_LAST_VALS; i < size; i++) {\n const start = i * stride;\n const end = start + stride;\n lines.push(...subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, i === size - 1 /* isLast */));\n }\n }\n else {\n for (let i = 0; i < size; i++) {\n const start = i * stride;\n const end = start + stride;\n lines.push(...subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, i === size - 1 /* isLast */));\n }\n }\n const sep = rank === 2 ? ',' : '';\n lines[0] = '[' + lines[0] + sep;\n for (let i = 1; i < lines.length - 1; i++) {\n lines[i] = ' ' + lines[i] + sep;\n }\n let newLineSep = ',\\n';\n for (let i = 2; i < rank; i++) {\n newLineSep += '\\n';\n }\n lines[lines.length - 1] =\n ' ' + lines[lines.length - 1] + ']' + (isLast ? '' : newLineSep);\n return lines;\n}\nfunction createComplexTuples(vals) {\n const complexTuples = [];\n for (let i = 0; i < vals.length; i += 2) {\n complexTuples.push([vals[i], vals[i + 1]]);\n }\n return complexTuples;\n}\n//# sourceMappingURL=tensor_format.js.map","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { tensorToString } from './tensor_format';\nimport * as util from './util';\nimport { computeStrides, toNestedArray } from './util';\n/**\n * A mutable object, similar to `tf.Tensor`, that allows users to set values\n * at locations before converting to an immutable `tf.Tensor`.\n *\n * See `tf.buffer` for creating a tensor buffer.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\nexport class TensorBuffer {\n constructor(shape, dtype, values) {\n this.dtype = dtype;\n this.shape = shape.slice();\n this.size = util.sizeFromShape(shape);\n if (values != null) {\n const n = values.length;\n util.assert(n === this.size, () => `Length of values '${n}' does not match the size ` +\n `inferred by the shape '${this.size}'.`);\n }\n if (dtype === 'complex64') {\n throw new Error(`complex64 dtype TensorBuffers are not supported. Please create ` +\n `a TensorBuffer for the real and imaginary parts separately and ` +\n `call tf.complex(real, imag).`);\n }\n this.values = values || util.getArrayFromDType(dtype, this.size);\n this.strides = computeStrides(shape);\n }\n /**\n * Sets a value in the buffer at a given location.\n *\n * @param value The value to set.\n * @param locs The location indices.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\n set(value, ...locs) {\n if (locs.length === 0) {\n locs = [0];\n }\n util.assert(locs.length === this.rank, () => `The number of provided coordinates (${locs.length}) must ` +\n `match the rank (${this.rank})`);\n const index = this.locToIndex(locs);\n this.values[index] = value;\n }\n /**\n * Returns the value in the buffer at the provided location.\n *\n * @param locs The location indices.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\n get(...locs) {\n if (locs.length === 0) {\n locs = [0];\n }\n let i = 0;\n for (const loc of locs) {\n if (loc < 0 || loc >= this.shape[i]) {\n const msg = `Requested out of range element at ${locs}. ` +\n ` Buffer shape=${this.shape}`;\n throw new Error(msg);\n }\n i++;\n }\n let index = locs[locs.length - 1];\n for (let i = 0; i < locs.length - 1; ++i) {\n index += this.strides[i] * locs[i];\n }\n return this.values[index];\n }\n locToIndex(locs) {\n if (this.rank === 0) {\n return 0;\n }\n else if (this.rank === 1) {\n return locs[0];\n }\n let index = locs[locs.length - 1];\n for (let i = 0; i < locs.length - 1; ++i) {\n index += this.strides[i] * locs[i];\n }\n return index;\n }\n indexToLoc(index) {\n if (this.rank === 0) {\n return [];\n }\n else if (this.rank === 1) {\n return [index];\n }\n const locs = new Array(this.shape.length);\n for (let i = 0; i < locs.length - 1; ++i) {\n locs[i] = Math.floor(index / this.strides[i]);\n index -= locs[i] * this.strides[i];\n }\n locs[locs.length - 1] = index;\n return locs;\n }\n get rank() {\n return this.shape.length;\n }\n /**\n * Creates an immutable `tf.Tensor` object from the buffer.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\n toTensor() {\n return trackerFn().makeTensor(this.values, this.shape, this.dtype);\n }\n}\n// For tracking tensor creation and disposal.\nlet trackerFn = null;\n// Used by chaining methods to call into ops.\nlet opHandler = null;\n// Used to warn about deprecated methods.\nlet deprecationWarningFn = null;\n// This here so that we can use this method on dev branches and keep the\n// functionality at master.\n// tslint:disable-next-line:no-unused-expression\n[deprecationWarningFn];\n/**\n * An external consumer can register itself as the tensor tracker. This way\n * the Tensor class can notify the tracker for every tensor created and\n * disposed.\n */\nexport function setTensorTracker(fn) {\n trackerFn = fn;\n}\n/**\n * An external consumer can register itself as the op handler. This way the\n * Tensor class can have chaining methods that call into ops via the op\n * handler.\n */\nexport function setOpHandler(handler) {\n opHandler = handler;\n}\n/**\n * Sets the deprecation warning function to be used by this file. This way the\n * Tensor class can be a leaf but still use the environment.\n */\nexport function setDeprecationWarningFn(fn) {\n deprecationWarningFn = fn;\n}\n/**\n * A `tf.Tensor` object represents an immutable, multidimensional array of\n * numbers that has a shape and a data type.\n *\n * See `tf.tensor` for details on how to create a `tf.Tensor`.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\nexport class Tensor {\n constructor(shape, dtype, dataId, id) {\n /** Whether this tensor has been globally kept. */\n this.kept = false;\n this.isDisposedInternal = false;\n this.shape = shape.slice();\n this.dtype = dtype || 'float32';\n this.size = util.sizeFromShape(shape);\n this.strides = computeStrides(shape);\n this.dataId = dataId;\n this.id = id;\n this.rankType = (this.rank < 5 ? this.rank.toString() : 'higher');\n }\n get rank() {\n return this.shape.length;\n }\n /**\n * Returns a promise of `tf.TensorBuffer` that holds the underlying data.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n async buffer() {\n const vals = await this.data();\n return opHandler.buffer(this.shape, this.dtype, vals);\n }\n /**\n * Returns a `tf.TensorBuffer` that holds the underlying data.\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n bufferSync() {\n return opHandler.buffer(this.shape, this.dtype, this.dataSync());\n }\n /**\n * Returns the tensor data as a nested array. The transfer of data is done\n * asynchronously.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n async array() {\n const vals = await this.data();\n return toNestedArray(this.shape, vals);\n }\n /**\n * Returns the tensor data as a nested array. The transfer of data is done\n * synchronously.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n arraySync() {\n return toNestedArray(this.shape, this.dataSync());\n }\n /**\n * Asynchronously downloads the values from the `tf.Tensor`. Returns a\n * promise of `TypedArray` that resolves when the computation has finished.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n async data() {\n this.throwIfDisposed();\n const data = trackerFn().read(this.dataId);\n if (this.dtype === 'string') {\n const bytes = await data;\n try {\n return bytes.map(b => util.decodeString(b));\n }\n catch (_a) {\n throw new Error('Failed to decode the string bytes into utf-8. ' +\n 'To get the original bytes, call tensor.bytes().');\n }\n }\n return data;\n }\n /**\n * Synchronously downloads the values from the `tf.Tensor`. This blocks the\n * UI thread until the values are ready, which can cause performance issues.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n dataSync() {\n this.throwIfDisposed();\n const data = trackerFn().readSync(this.dataId);\n if (this.dtype === 'string') {\n try {\n return data.map(b => util.decodeString(b));\n }\n catch (_a) {\n throw new Error('Failed to decode the string bytes into utf-8. ' +\n 'To get the original bytes, call tensor.bytes().');\n }\n }\n return data;\n }\n /** Returns the underlying bytes of the tensor's data. */\n async bytes() {\n this.throwIfDisposed();\n const data = await trackerFn().read(this.dataId);\n if (this.dtype === 'string') {\n return data;\n }\n else {\n return new Uint8Array(data.buffer);\n }\n }\n /**\n * Disposes `tf.Tensor` from memory.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n dispose() {\n if (this.isDisposed) {\n return;\n }\n trackerFn().disposeTensor(this);\n this.isDisposedInternal = true;\n }\n get isDisposed() {\n return this.isDisposedInternal;\n }\n throwIfDisposed() {\n if (this.isDisposed) {\n throw new Error(`Tensor is disposed.`);\n }\n }\n /**\n * Prints the `tf.Tensor`. See `tf.print` for details.\n *\n * @param verbose Whether to print verbose information about the tensor,\n * including dtype and size.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n print(verbose = false) {\n return opHandler.print(this, verbose);\n }\n /**\n * Returns a copy of the tensor. See `tf.clone` for details.\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n clone() {\n this.throwIfDisposed();\n return opHandler.clone(this);\n }\n /**\n * Returns a human-readable description of the tensor. Useful for logging.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n toString(verbose = false) {\n const vals = this.dataSync();\n return tensorToString(vals, this.shape, this.dtype, verbose);\n }\n cast(dtype) {\n this.throwIfDisposed();\n return opHandler.cast(this, dtype);\n }\n variable(trainable = true, name, dtype) {\n this.throwIfDisposed();\n return trackerFn().makeVariable(this, trainable, name, dtype);\n }\n}\nObject.defineProperty(Tensor, Symbol.hasInstance, {\n value: (instance) => {\n // Implementation note: we should use properties of the object that will be\n // defined before the constructor body has finished executing (methods).\n // This is because when this code is transpiled by babel, babel will call\n // classCallCheck before the constructor body is run.\n // See https://github.com/tensorflow/tfjs/issues/3384 for backstory.\n return !!instance && instance.data != null && instance.dataSync != null &&\n instance.throwIfDisposed != null;\n }\n});\n/**\n * A mutable `tf.Tensor`, useful for persisting state, e.g. for training.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\nexport class Variable extends Tensor {\n constructor(initialValue, trainable, name, tensorId) {\n super(initialValue.shape, initialValue.dtype, initialValue.dataId, tensorId);\n this.trainable = trainable;\n this.name = name;\n }\n /**\n * Assign a new `tf.Tensor` to this variable. The new `tf.Tensor` must have\n * the same shape and dtype as the old `tf.Tensor`.\n *\n * @param newValue New tensor to be assigned to this variable.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n assign(newValue) {\n if (newValue.dtype !== this.dtype) {\n throw new Error(`dtype of the new value (${newValue.dtype}) and ` +\n `previous value (${this.dtype}) must match`);\n }\n if (!util.arraysEqual(newValue.shape, this.shape)) {\n throw new Error(`shape of the new value (${newValue.shape}) and ` +\n `previous value (${this.shape}) must match`);\n }\n trackerFn().disposeTensor(this);\n this.dataId = newValue.dataId;\n trackerFn().incRef(this, null /* backend */);\n }\n dispose() {\n trackerFn().disposeVariable(this);\n this.isDisposedInternal = true;\n }\n}\nObject.defineProperty(Variable, Symbol.hasInstance, {\n value: (instance) => {\n return instance instanceof Tensor && instance.assign != null &&\n instance.assign instanceof Function;\n }\n});\n//# sourceMappingURL=tensor.js.map","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nexport var Rank;\n(function (Rank) {\n Rank[\"R0\"] = \"R0\";\n Rank[\"R1\"] = \"R1\";\n Rank[\"R2\"] = \"R2\";\n Rank[\"R3\"] = \"R3\";\n Rank[\"R4\"] = \"R4\";\n Rank[\"R5\"] = \"R5\";\n Rank[\"R6\"] = \"R6\";\n})(Rank || (Rank = {}));\n// Looks for upcasting types. Used, for example, in operations with mixed dtype\n// inputs.\nvar UpcastInt32AndMap;\n(function (UpcastInt32AndMap) {\n UpcastInt32AndMap[\"float32\"] = \"float32\";\n UpcastInt32AndMap[\"int32\"] = \"int32\";\n UpcastInt32AndMap[\"bool\"] = \"int32\";\n UpcastInt32AndMap[\"complex64\"] = \"complex64\";\n})(UpcastInt32AndMap || (UpcastInt32AndMap = {}));\nvar UpcastBoolAndMap;\n(function (UpcastBoolAndMap) {\n UpcastBoolAndMap[\"float32\"] = \"float32\";\n UpcastBoolAndMap[\"int32\"] = \"int32\";\n UpcastBoolAndMap[\"bool\"] = \"bool\";\n UpcastBoolAndMap[\"complex64\"] = \"complex64\";\n})(UpcastBoolAndMap || (UpcastBoolAndMap = {}));\nvar UpcastFloat32AndMap;\n(function (UpcastFloat32AndMap) {\n UpcastFloat32AndMap[\"float32\"] = \"float32\";\n UpcastFloat32AndMap[\"int32\"] = \"float32\";\n UpcastFloat32AndMap[\"bool\"] = \"float32\";\n UpcastFloat32AndMap[\"complex64\"] = \"complex64\";\n})(UpcastFloat32AndMap || (UpcastFloat32AndMap = {}));\nvar UpcastComplex64AndMap;\n(function (UpcastComplex64AndMap) {\n UpcastComplex64AndMap[\"float32\"] = \"complex64\";\n UpcastComplex64AndMap[\"int32\"] = \"complex64\";\n UpcastComplex64AndMap[\"bool\"] = \"complex64\";\n UpcastComplex64AndMap[\"complex64\"] = \"complex64\";\n})(UpcastComplex64AndMap || (UpcastComplex64AndMap = {}));\nconst upcastTypeMap = {\n 'float32': UpcastFloat32AndMap,\n 'int32': UpcastInt32AndMap,\n 'bool': UpcastBoolAndMap,\n 'complex64': UpcastComplex64AndMap\n};\nexport function upcastType(typeA, typeB) {\n if (typeA === 'string' || typeB === 'string') {\n if (typeA === 'string' && typeB === 'string') {\n return 'string';\n }\n throw new Error(`Can not upcast ${typeA} with ${typeB}`);\n }\n return upcastTypeMap[typeA][typeB];\n}\n/** Returns the output type after summation. */\nexport function sumOutType(type) {\n return upcastType(type, 'int32');\n}\n//# sourceMappingURL=types.js.map","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { Tensor } from './tensor';\nimport { upcastType } from './types';\nimport { assert } from './util';\nexport function makeTypesMatch(a, b) {\n if (a.dtype === b.dtype) {\n return [a, b];\n }\n const dtype = upcastType(a.dtype, b.dtype);\n return [a.cast(dtype), b.cast(dtype)];\n}\nexport function assertTypesMatch(a, b) {\n assert(a.dtype === b.dtype, () => `The dtypes of the first(${a.dtype}) and` +\n ` second(${b.dtype}) input must match`);\n}\nexport function isTensorInList(tensor, tensorList) {\n return tensorList.some(x => x.id === tensor.id);\n}\n/**\n * Extracts any `Tensor`s found within the provided object.\n *\n * @param container an object that may be a `Tensor` or may directly contain\n * `Tensor`s, such as a `Tensor[]` or `{key: Tensor, ...}`. In general it\n * is safe to pass any object here, except that `Promise`s are not\n * supported.\n * @returns An array of `Tensors` found within the passed object. If the\n * argument is simply a `Tensor', a list containing that `Tensor` is\n * returned. If the object is not a `Tensor` or does not\n * contain `Tensors`, an empty list is returned.\n */\nexport function getTensorsInContainer(result) {\n const list = [];\n const seen = new Set();\n walkTensorContainer(result, list, seen);\n return list;\n}\nfunction walkTensorContainer(container, list, seen) {\n if (container == null) {\n return;\n }\n if (container instanceof Tensor) {\n list.push(container);\n return;\n }\n if (!isIterable(container)) {\n return;\n }\n // Iteration over keys works also for arrays.\n const iterable = container;\n for (const k in iterable) {\n const val = iterable[k];\n if (!seen.has(val)) {\n seen.add(val);\n walkTensorContainer(val, list, seen);\n }\n }\n}\n// tslint:disable-next-line:no-any\nfunction isIterable(obj) {\n return Array.isArray(obj) || typeof obj === 'object';\n}\n//# sourceMappingURL=tensor_util.js.map","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { KernelBackend } from './backends/backend';\nimport { Environment, setEnvironmentGlobal } from './environment';\nimport { getGlobalNamespace } from './global_util';\nimport { Add, Cast } from './kernel_names';\nimport { getGradient, getKernel, getKernelsForBackend } from './kernel_registry';\nimport { Profiler } from './profiler';\nimport { backpropagateGradients, getFilteredNodesXToY } from './tape';\nimport { setTensorTracker, Tensor, Variable } from './tensor';\nimport { getTensorsInContainer } from './tensor_util';\nimport * as util from './util';\nimport { bytesFromStringArray, makeOnesTypedArray, now, sizeFromShape } from './util';\nclass EngineState {\n constructor() {\n // Public since optimizers will use it.\n this.registeredVariables = {};\n this.nextTapeNodeId = 0;\n this.numBytes = 0;\n this.numTensors = 0;\n this.numStringTensors = 0;\n this.numDataBuffers = 0;\n // Number of nested tf.grad() statements when computing higher-order\n // gradients. E.g. `1` for first-order gradients and `2` for second-order\n // gradients. Used to track if the tape should be removed after a backprop.\n this.gradientDepth = 0;\n // Number of nested kernel calls. When kernel depth is greater than 1, we turn\n // off the tape.\n this.kernelDepth = 0;\n this.scopeStack = [];\n /**\n * Keeps track of the number of data moves during a kernel execution. We\n * maintain a stack since kernels can call other kernels, recursively.\n */\n this.numDataMovesStack = [];\n this.nextScopeId = 0;\n this.tensorInfo = new WeakMap();\n this.profiling = false;\n this.activeProfile = { newBytes: 0, newTensors: 0, peakBytes: 0, kernels: [], result: null };\n }\n dispose() {\n for (const variableName in this.registeredVariables) {\n this.registeredVariables[variableName].dispose();\n }\n }\n}\nexport class Engine {\n constructor(ENV) {\n this.ENV = ENV;\n this.registry = {};\n this.registryFactory = {};\n this.pendingBackendInitId = 0;\n this.state = new EngineState();\n }\n async ready() {\n if (this.pendingBackendInit != null) {\n return this.pendingBackendInit.then(() => { });\n }\n if (this.backendInstance != null) {\n return;\n }\n const sortedBackends = this.getSortedBackends();\n for (let i = 0; i < sortedBackends.length; i++) {\n const backendName = sortedBackends[i];\n const success = await this.initializeBackend(backendName).success;\n if (success) {\n await this.setBackend(backendName);\n return;\n }\n }\n throw new Error(`Could not initialize any backends, all backend initializations ` +\n `failed.`);\n }\n get backend() {\n if (this.pendingBackendInit != null) {\n throw new Error(`Backend '${this.backendName}' has not yet been initialized. Make ` +\n `sure to await tf.ready() or await tf.setBackend() before calling ` +\n `other methods`);\n }\n if (this.backendInstance == null) {\n const { name, asyncInit } = this.initializeBackendsAndReturnBest();\n if (asyncInit) {\n throw new Error(`The highest priority backend '${name}' has not yet been ` +\n `initialized. Make sure to await tf.ready() or ` +\n `await tf.setBackend() before calling other methods`);\n }\n this.setBackend(name);\n }\n return this.backendInstance;\n }\n backendNames() {\n return Object.keys(this.registryFactory);\n }\n findBackend(backendName) {\n if (!(backendName in this.registry)) {\n // If the backend hasn't been initialized but we have a registry entry for\n // it, initialize it and return it.\n if (backendName in this.registryFactory) {\n const { asyncInit } = this.initializeBackend(backendName);\n if (asyncInit) {\n // Backend is not ready yet.\n return null;\n }\n }\n else {\n return null;\n }\n }\n return this.registry[backendName];\n }\n findBackendFactory(backendName) {\n if (!(backendName in this.registryFactory)) {\n return null;\n }\n return this.registryFactory[backendName].factory;\n }\n registerBackend(backendName, factory, priority = 1) {\n if (backendName in this.registryFactory) {\n console.warn(`${backendName} backend was already registered. ` +\n `Reusing existing backend factory.`);\n return false;\n }\n this.registryFactory[backendName] = { factory, priority };\n return true;\n }\n async setBackend(backendName) {\n if (this.registryFactory[backendName] == null) {\n throw new Error(`Backend name '${backendName}' not found in registry`);\n }\n this.backendName = backendName;\n if (this.registry[backendName] == null) {\n this.backendInstance = null;\n const { success, asyncInit } = this.initializeBackend(backendName);\n const result = asyncInit ? await success : success;\n if (!result) {\n return false;\n }\n }\n this.backendInstance = this.registry[backendName];\n this.setupRegisteredKernels();\n // Reset the profiler.\n this.profiler = new Profiler(this.backendInstance);\n return true;\n }\n setupRegisteredKernels() {\n const kernels = getKernelsForBackend(this.backendName);\n kernels.forEach(kernel => {\n if (kernel.setupFunc != null) {\n kernel.setupFunc(this.backendInstance);\n }\n });\n }\n disposeRegisteredKernels(backendName) {\n const kernels = getKernelsForBackend(backendName);\n kernels.forEach(kernel => {\n if (kernel.disposeFunc != null) {\n kernel.disposeFunc(this.registry[backendName]);\n }\n });\n }\n /**\n * Initializes a backend by looking up the backend name in the factory\n * registry and calling the factory method. Returns a boolean representing\n * whether the initialization of the backend suceeded. Throws an error if\n * there is no backend in the factory registry.\n */\n initializeBackend(backendName) {\n const registryFactoryEntry = this.registryFactory[backendName];\n if (registryFactoryEntry == null) {\n throw new Error(`Cannot initialize backend ${backendName}, no registration found.`);\n }\n try {\n const backend = registryFactoryEntry.factory();\n /* Test if the factory returns a promise.\n Done in a more liberal way than\n previous 'Promise.resolve(backend)===backend'\n as we needed to account for custom Promise\n implementations (e.g. Angular) */\n if (backend && !(backend instanceof KernelBackend)\n && typeof backend.then === 'function') {\n const promiseId = ++this.pendingBackendInitId;\n const success = backend\n .then(backendInstance => {\n // Outdated promise. Another backend was set in the meantime.\n if (promiseId < this.pendingBackendInitId) {\n return false;\n }\n this.registry[backendName] = backendInstance;\n this.pendingBackendInit = null;\n return true;\n })\n .catch(err => {\n // Outdated promise. Another backend was set in the meantime.\n if (promiseId < this.pendingBackendInitId) {\n return false;\n }\n this.pendingBackendInit = null;\n console.warn(`Initialization of backend ${backendName} failed`);\n console.warn(err.stack || err.message);\n return false;\n });\n this.pendingBackendInit = success;\n return { success, asyncInit: true };\n }\n else {\n this.registry[backendName] = backend;\n return { success: true, asyncInit: false };\n }\n }\n catch (err) {\n console.warn(`Initialization of backend ${backendName} failed`);\n console.warn(err.stack || err.message);\n return { success: false, asyncInit: false };\n }\n }\n removeBackend(backendName) {\n if (!(backendName in this.registryFactory)) {\n throw new Error(`${backendName} backend not found in registry`);\n }\n if (this.backendName === backendName && this.pendingBackendInit != null) {\n // There is a pending promise of the backend we want to remove. Make it\n // obsolete.\n this.pendingBackendInitId++;\n }\n if (backendName in this.registry) {\n this.disposeRegisteredKernels(backendName);\n this.registry[backendName].dispose();\n delete this.registry[backendName];\n }\n delete this.registryFactory[backendName];\n // Unset the backend if it is active.\n if (this.backendName === backendName) {\n this.pendingBackendInit = null;\n this.backendName = null;\n this.backendInstance = null;\n }\n }\n getSortedBackends() {\n if (Object.keys(this.registryFactory).length === 0) {\n throw new Error('No backend found in registry.');\n }\n return Object.keys(this.registryFactory).sort((a, b) => {\n // Highest priority comes first.\n return this.registryFactory[b].priority -\n this.registryFactory[a].priority;\n });\n }\n initializeBackendsAndReturnBest() {\n const sortedBackends = this.getSortedBackends();\n for (let i = 0; i < sortedBackends.length; i++) {\n const backendName = sortedBackends[i];\n const { success, asyncInit } = this.initializeBackend(backendName);\n if (asyncInit || success) {\n return { name: backendName, asyncInit };\n }\n }\n throw new Error(`Could not initialize any backends, all backend initializations ` +\n `failed.`);\n }\n moveData(backend, dataId) {\n const info = this.state.tensorInfo.get(dataId);\n const srcBackend = info.backend;\n const values = this.readSync(dataId);\n // Delete the tensor from the old backend and move it to the new\n // backend.\n srcBackend.disposeData(dataId);\n info.backend = backend;\n backend.move(dataId, values, info.shape, info.dtype);\n if (this.shouldCheckForMemLeaks()) {\n // Track the number of moves during a kernel execution to correctly\n // detect memory leaks.\n this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1]++;\n }\n }\n tidy(nameOrFn, fn) {\n let name = null;\n if (fn == null) {\n // Called with only 1 argument.\n if (typeof nameOrFn !== 'function') {\n throw new Error('Please provide a function to tidy()');\n }\n fn = nameOrFn;\n }\n else {\n // Called with 2 arguments.\n if (typeof nameOrFn !== 'string' && !(nameOrFn instanceof String)) {\n throw new Error('When calling with two arguments, the first argument ' +\n 'to tidy() must be a string');\n }\n if (typeof fn !== 'function') {\n throw new Error('When calling with two arguments, the 2nd argument ' +\n 'to tidy() must be a function');\n }\n name = nameOrFn;\n // TODO(nsthorat,smilkov): Do operation logging and performance\n // profiling.\n }\n let result;\n return this.scopedRun(() => this.startScope(name), () => this.endScope(result), () => {\n result = fn();\n if (result instanceof Promise) {\n console.error('Cannot return a Promise inside of tidy.');\n }\n return result;\n });\n }\n scopedRun(start, end, f) {\n start();\n try {\n const res = f();\n end();\n return res;\n }\n catch (ex) {\n end();\n throw ex;\n }\n }\n nextTensorId() {\n return Engine.nextTensorId++;\n }\n nextVariableId() {\n return Engine.nextVariableId++;\n }\n /**\n * This method is called instead of the public-facing tensor.clone() when\n * saving a tensor for backwards pass. It makes sure to add the clone\n * operation to the tape regardless of being called inside a kernel\n * execution.\n *\n * This method will go away once all kernels are modularized since we won't\n * need to turn off the tape inside runKernel().\n */\n clone(x) {\n const y = this.makeTensorFromDataId(x.dataId, x.shape, x.dtype);\n const inputs = { x };\n const grad = (dy) => ({\n x: () => {\n const dtype = 'float32';\n const gradInputs = { x: dy };\n const attrs = { dtype };\n return ENGINE.runKernelFunc(backend => backend.cast(dy, dtype), gradInputs, null /* grad */, Cast, attrs);\n }\n });\n const saved = [];\n this.addTapeNode(this.state.activeScope.name, inputs, [y], grad, saved, {});\n return y;\n }\n /**\n * Execute a kernel with the given name and return the output tensor.\n *\n * @param kernelName The name of the kernel to execute.\n * @param inputs A map of input names to tensors.\n * @param attrs A map of attribute names to their values. An attribute is a\n * primitive (non-tensor) input to the kernel.\n * @param inputsToSave A list of tensors, inputs to save for the backprop\n * computation.\n * @param outputsToSave A list of booleans, specifying which output to save\n * for the backprop computation. These are booleans since the output\n * tensors are not visible to the user.\n */\n runKernel(kernelName, inputs, attrs, inputsToSave, outputsToSave) {\n const forwardFunc = null;\n const backwardsFunc = null;\n // Call runKernel as a stop-gap until we modularize all kernels.\n // Once we modularize all kernels, we will remove the existing\n // `runKernelFunc`.\n return this.runKernelFunc(forwardFunc, inputs, backwardsFunc, kernelName, attrs, inputsToSave, outputsToSave);\n }\n shouldCheckForMemLeaks() {\n return this.ENV.getBool('IS_TEST');\n }\n checkKernelForMemLeak(kernelName, numDataIdsBefore, outInfos) {\n const numDataIdsAfter = this.backend.numDataIds();\n // Count the number of data ids associated with the result of the kernel.\n let numOutputDataIds = 0;\n outInfos.forEach(info => {\n // Complex numbers allocate 3 data ids, one for 'real', one for\n // 'imaginary', and one for the container that holds the former two.\n numOutputDataIds += (info.dtype === 'complex64' ? 3 : 1);\n });\n // Account for the number of moves during kernel execution. A \"data move\"\n // can happen in the middle of a kernel execution, placing a new (key,value)\n // pair in the data storage. Since data moves have net zero effect (we\n // always remove the data from the old backend), we have to cancel them out\n // when detecting memory leaks.\n const numMoves = this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1];\n const dataIdsLeaked = numDataIdsAfter - numDataIdsBefore - numOutputDataIds - numMoves;\n if (dataIdsLeaked > 0) {\n throw new Error(`Backend '${this.backendName}' has an internal memory leak ` +\n `(${dataIdsLeaked} data ids) after running '${kernelName}'`);\n }\n }\n /**\n * @deprecated Use `runKernel` for newly added kernels. Keep using this method\n * only for kernels that are not yet fully modularized.\n */\n runKernelFunc(forwardFunc, inputs, backwardsFunc, kernelName, attrs, inputsToSave, outputsToSave) {\n let outputs;\n let saved = [];\n const isTapeOn = this.isTapeOn();\n if (kernelName == null) {\n kernelName =\n this.state.activeScope != null ? this.state.activeScope.name : '';\n }\n const startingBytecount = this.state.numBytes;\n const startingNumTensors = this.state.numTensors;\n if (this.shouldCheckForMemLeaks()) {\n this.state.numDataMovesStack.push(0);\n }\n let kernelFunc;\n const kernel = getKernel(kernelName, this.backendName);\n let out;\n if (kernel != null) {\n kernelFunc = () => {\n const numDataIdsBefore = this.backend.numDataIds();\n out = kernel.kernelFunc({ inputs, attrs, backend: this.backend });\n const outInfos = Array.isArray(out) ? out : [out];\n if (this.shouldCheckForMemLeaks()) {\n this.checkKernelForMemLeak(kernelName, numDataIdsBefore, outInfos);\n }\n const outTensors = outInfos.map(({ dataId, shape, dtype }) => this.makeTensorFromDataId(dataId, shape, dtype));\n // Save the inputs and outputs.\n // Do not save unless we are recording to the tape. Otherwise it would\n // cause a mem leak since we would never run backprop, which disposes\n // the kept tensors.\n if (isTapeOn) {\n let tensorsToSave = this.getTensorsForGradient(kernelName, inputs, outTensors);\n if (tensorsToSave == null) {\n // Fallback for ops that call runKernelFunc and pass in\n // inputsToSave and outputsToSave. Currently this is the set of ops\n // with kernel support in the WASM backend. Once those ops and\n // respective gradients are modularised we can remove this path.\n if (outputsToSave == null) {\n outputsToSave = [];\n }\n const outsToSave = outTensors.filter((_, i) => outputsToSave[i]);\n tensorsToSave = (inputsToSave || []).slice().concat(outsToSave);\n }\n saved = this.saveTensorsForBackwardMode(tensorsToSave);\n }\n return outTensors;\n };\n }\n else {\n const saveFunc = (tensors) => {\n // Do not save unless we are recording to the tape. Otherwise it would\n // cause a mem leak since we would never run backprop, which disposes\n // the kept tensors.\n if (!isTapeOn) {\n return;\n }\n saved = tensors.map(tensor => this.keep(this.clone(tensor)));\n };\n kernelFunc = () => {\n const numDataIdsBefore = this.backend.numDataIds();\n out = this.tidy(() => forwardFunc(this.backend, saveFunc));\n const outs = (Array.isArray(out) ? out : [out]);\n if (this.shouldCheckForMemLeaks()) {\n this.checkKernelForMemLeak(kernelName, numDataIdsBefore, outs);\n }\n return outs;\n };\n }\n // Stop recording to a tape when running a kernel.\n let kernelProfile;\n this.scopedRun(() => this.state.kernelDepth++, () => this.state.kernelDepth--, () => {\n if (!this.ENV.getBool('DEBUG') && !this.state.profiling) {\n outputs = kernelFunc();\n }\n else {\n kernelProfile = this.profiler.profileKernel(kernelName, inputs, () => kernelFunc());\n if (this.ENV.getBool('DEBUG')) {\n this.profiler.logKernelProfile(kernelProfile);\n }\n outputs = kernelProfile.outputs;\n }\n });\n if (isTapeOn) {\n this.addTapeNode(kernelName, inputs, outputs, backwardsFunc, saved, attrs);\n }\n if (this.state.profiling) {\n this.state.activeProfile.kernels.push({\n name: kernelName,\n bytesAdded: this.state.numBytes - startingBytecount,\n totalBytesSnapshot: this.state.numBytes,\n tensorsAdded: this.state.numTensors - startingNumTensors,\n totalTensorsSnapshot: this.state.numTensors,\n inputShapes: Object.keys(inputs).map(key => inputs[key] != null ? inputs[key].shape : null),\n outputShapes: outputs.map(item => item.shape),\n kernelTimeMs: kernelProfile.timeMs,\n extraInfo: kernelProfile.extraInfo\n });\n }\n return (Array.isArray(out) ? outputs : outputs[0]);\n }\n /**\n * Saves tensors used in forward mode for use in backward mode.\n *\n * @param tensors the list of tensors to save.\n */\n saveTensorsForBackwardMode(tensors) {\n const saved = tensors.map(tensor => this.keep(this.clone(tensor)));\n return saved;\n }\n /**\n * Returns a list of tensors to save for a given gradient calculation.\n *\n * Returns undefined if their is no registered gradient for this kernel in the\n * gradient registry.\n *\n * @param kernelName name of kernel to look up gradient for.\n * @param inputs a map of input tensors.\n * @param outputs an array of output tensors from forward mode of kernel.\n */\n getTensorsForGradient(kernelName, inputs, outputs) {\n const gradConfig = getGradient(kernelName);\n if (gradConfig != null) {\n const inputsToSave = gradConfig.inputsToSave || [];\n const outputsToSave = gradConfig.outputsToSave || [];\n // If saveAllInputs is true, all inputs will be saved. Otherwise, inputs\n // specified in inputsToSave will be saved.\n let inputTensorsToSave;\n if (gradConfig.saveAllInputs) {\n util.assert(Array.isArray(inputs), () => 'saveAllInputs is true, expected inputs to be an array.');\n inputTensorsToSave = Object.keys(inputs).map((key) => inputs[key]);\n }\n else {\n inputTensorsToSave = inputsToSave.map((inputName) => inputs[inputName]);\n }\n const outputTensorsToSave = outputs.filter((_, i) => outputsToSave[i]);\n return inputTensorsToSave.concat(outputTensorsToSave);\n }\n // TODO(yassogba) throw exception here once all runkernelFunc calls with\n // inputsToSave/outputsToSave are removed\n return null;\n }\n /**\n * Internal method used by public APIs for tensor creation. Makes a new\n * tensor with the provided shape, dtype and values. It always\n * creates a new data id and writes the values to the underlying backend.\n */\n makeTensor(values, shape, dtype, backend) {\n if (values == null) {\n throw new Error('Values passed to engine.makeTensor() are null');\n }\n dtype = dtype || 'float32';\n backend = backend || this.backend;\n let backendVals = values;\n if (dtype === 'string' && util.isString(values[0])) {\n backendVals = values.map(d => util.encodeString(d));\n }\n const dataId = backend.write(backendVals, shape, dtype);\n const t = new Tensor(shape, dtype, dataId, this.nextTensorId());\n this.incRef(t, backend);\n // Count bytes for string tensors.\n if (dtype === 'string') {\n const info = this.state.tensorInfo.get(dataId);\n const newBytes = bytesFromStringArray(backendVals);\n this.state.numBytes += newBytes - info.bytes;\n info.bytes = newBytes;\n }\n return t;\n }\n /**\n * Internal method used by backends. Makes a new tensor\n * that is a wrapper around an existing data id. It doesn't create\n * a new data id, only increments the ref count used in memory tracking.\n */\n makeTensorFromDataId(dataId, shape, dtype, backend) {\n dtype = dtype || 'float32';\n const t = new Tensor(shape, dtype, dataId, this.nextTensorId());\n this.incRef(t, backend);\n return t;\n }\n makeVariable(initialValue, trainable = true, name, dtype) {\n name = name || this.nextVariableId().toString();\n if (dtype != null && dtype !== initialValue.dtype) {\n initialValue = initialValue.cast(dtype);\n }\n const v = new Variable(initialValue, trainable, name, this.nextTensorId());\n if (this.state.registeredVariables[v.name] != null) {\n throw new Error(`Variable with name ${v.name} was already registered`);\n }\n this.state.registeredVariables[v.name] = v;\n this.incRef(v, this.backend);\n return v;\n }\n incRef(a, backend) {\n const refCount = this.state.tensorInfo.has(a.dataId) ?\n this.state.tensorInfo.get(a.dataId).refCount :\n 0;\n this.state.numTensors++;\n if (a.dtype === 'string') {\n this.state.numStringTensors++;\n }\n if (refCount === 0) {\n this.state.numDataBuffers++;\n // Bytes for complex numbers are counted by their components. Bytes for\n // string tensors are counted when writing values.\n let bytes = 0;\n if (a.dtype !== 'complex64' && a.dtype !== 'string') {\n bytes = a.size * util.bytesPerElement(a.dtype);\n }\n this.state.tensorInfo.set(a.dataId, {\n backend: backend || this.backend,\n dtype: a.dtype,\n shape: a.shape,\n bytes,\n refCount: 0\n });\n this.state.numBytes += bytes;\n }\n this.state.tensorInfo.get(a.dataId).refCount++;\n if (!(a instanceof Variable)) {\n this.track(a);\n }\n }\n disposeTensor(a) {\n if (!this.state.tensorInfo.has(a.dataId)) {\n return;\n }\n this.state.numTensors--;\n if (a.dtype === 'string') {\n this.state.numStringTensors--;\n }\n const info = this.state.tensorInfo.get(a.dataId);\n const refCount = info.refCount;\n if (refCount <= 1) {\n // Don't count bytes for complex numbers as they are counted by their\n // components.\n if (a.dtype !== 'complex64') {\n this.state.numBytes -= info.bytes;\n }\n this.state.numDataBuffers--;\n info.backend.disposeData(a.dataId);\n this.state.tensorInfo.delete(a.dataId);\n }\n else {\n this.state.tensorInfo.get(a.dataId).refCount--;\n }\n // TODO(nsthorat): Construct an error and save the stack trace for\n // debugging when in debug mode. Creating a stack trace is too expensive\n // to do unconditionally.\n }\n disposeVariables() {\n for (const varName in this.state.registeredVariables) {\n const v = this.state.registeredVariables[varName];\n this.disposeVariable(v);\n }\n }\n disposeVariable(v) {\n this.disposeTensor(v);\n if (this.state.registeredVariables[v.name] != null) {\n delete this.state.registeredVariables[v.name];\n }\n }\n memory() {\n const info = this.backend.memory();\n info.numTensors = this.state.numTensors;\n info.numDataBuffers = this.state.numDataBuffers;\n info.numBytes = this.state.numBytes;\n if (this.state.numStringTensors > 0) {\n info.unreliable = true;\n if (info.reasons == null) {\n info.reasons = [];\n }\n info.reasons.push('Memory usage by string tensors is approximate ' +\n '(2 bytes per character)');\n }\n return info;\n }\n async profile(query) {\n this.state.profiling = true;\n const startBytes = this.state.numBytes;\n const startNumTensors = this.state.numTensors;\n this.state.activeProfile.kernels = [];\n this.state.activeProfile.result = await query();\n this.state.profiling = false;\n this.state.activeProfile.peakBytes = Math.max(...this.state.activeProfile.kernels.map(d => d.totalBytesSnapshot));\n this.state.activeProfile.newBytes = this.state.numBytes - startBytes;\n this.state.activeProfile.newTensors =\n this.state.numTensors - startNumTensors;\n for (const kernel of this.state.activeProfile.kernels) {\n kernel.kernelTimeMs = await kernel.kernelTimeMs;\n kernel.extraInfo = await kernel.extraInfo;\n }\n return this.state.activeProfile;\n }\n isTapeOn() {\n return this.state.gradientDepth > 0 && this.state.kernelDepth === 0;\n }\n addTapeNode(kernelName, inputs, outputs, gradientsFunc, saved, attrs) {\n const tapeNode = { id: this.state.nextTapeNodeId++, kernelName, inputs, outputs, saved };\n const gradConfig = getGradient(kernelName);\n if (gradConfig != null) {\n gradientsFunc = gradConfig.gradFunc;\n }\n if (gradientsFunc != null) {\n tapeNode.gradient = (dys) => {\n // TODO(smilkov): To optimize back-prop, pass dys that are not used in\n // the backprop graph to the user as null instead of zeros\n dys = dys.map((dy, i) => {\n if (dy == null) {\n const output = outputs[i];\n const vals = util.makeZerosTypedArray(output.size, output.dtype);\n return this.makeTensor(vals, output.shape, output.dtype);\n }\n return dy;\n });\n // Grad functions of ops with single outputs expect a dy, while ops\n // with multiple outputs expect dys (array of dy).\n return gradientsFunc(dys.length > 1 ? dys : dys[0], saved, attrs);\n };\n }\n this.state.activeTape.push(tapeNode);\n }\n keep(result) {\n result.kept = true;\n return result;\n }\n startTape() {\n if (this.state.gradientDepth === 0) {\n this.state.activeTape = [];\n }\n this.state.gradientDepth++;\n }\n endTape() {\n this.state.gradientDepth--;\n }\n /**\n * Start a scope. Use this with endScope() to achieve the same functionality\n * as scope() without the need for a function closure.\n */\n startScope(name) {\n const scopeInfo = {\n track: [],\n name: 'unnamed scope',\n id: this.state.nextScopeId++\n };\n if (name) {\n scopeInfo.name = name;\n }\n this.state.scopeStack.push(scopeInfo);\n this.state.activeScope = scopeInfo;\n }\n /**\n * End a scope. Use this with startScope() to achieve the same functionality\n * as scope() without the need for a function closure.\n */\n endScope(result) {\n const tensorsToTrackInParent = getTensorsInContainer(result);\n const tensorsToTrackInParentSet = new Set(tensorsToTrackInParent.map(t => t.id));\n // Dispose the arrays tracked in this scope.\n for (let i = 0; i < this.state.activeScope.track.length; i++) {\n const tensor = this.state.activeScope.track[i];\n if (!tensor.kept && !tensorsToTrackInParentSet.has(tensor.id)) {\n tensor.dispose();\n }\n }\n const oldScope = this.state.scopeStack.pop();\n this.state.activeScope = this.state.scopeStack.length === 0 ?\n null :\n this.state.scopeStack[this.state.scopeStack.length - 1];\n // Track the current result in the parent scope.\n tensorsToTrackInParent.forEach(tensor => {\n // Only track the tensor if was allocated in the inner scope and is not\n // globally kept.\n if (!tensor.kept && tensor.scopeId === oldScope.id) {\n this.track(tensor);\n }\n });\n }\n /**\n * Returns gradients of `f` with respect to each of the `xs`. The gradients\n * returned are of the same length as `xs`, but some might be null if `f`\n * was not a function of that `x`. It also takes optional dy to multiply the\n * gradient, which defaults to `1`.\n */\n gradients(f, xs, dy, allowNoGradients = false) {\n util.assert(xs.length > 0, () => 'gradients() received an empty list of xs.');\n if (dy != null && dy.dtype !== 'float32') {\n throw new Error(`dy must have 'float32' dtype, but has '${dy.dtype}'`);\n }\n const y = this.scopedRun(() => this.startTape(), () => this.endTape(), () => this.tidy('forward', f));\n util.assert(y instanceof Tensor, () => 'The result y returned by f() must be a tensor.');\n // Filter out the nodes that don't connect x => y.\n const filteredTape = getFilteredNodesXToY(this.state.activeTape, xs, y);\n if (!allowNoGradients && filteredTape.length === 0 && xs.length > 0) {\n throw new Error('Cannot compute gradient of y=f(x) with respect to x. Make sure ' +\n 'that the f you passed encloses all operations that lead from x ' +\n 'to y.');\n }\n return this.tidy('backward', () => {\n const accumulatedGradientMap = {};\n accumulatedGradientMap[y.id] = (dy == null) ? ones(y.shape) : dy;\n // Backprop gradients through the filtered nodes.\n backpropagateGradients(accumulatedGradientMap, filteredTape, \n // Pass the tidy function to avoid circular dep with `tape.ts`.\n f => this.tidy(f), \n // Pass an add function to avoide a circular dep with `tape.ts`.\n add);\n const grads = xs.map(x => accumulatedGradientMap[x.id]);\n if (this.state.gradientDepth === 0) {\n // This means that we are not computing higher-order gradients\n // and can clean up the tape.\n this.state.activeTape.forEach(node => {\n for (const tensor of node.saved) {\n tensor.dispose();\n }\n });\n this.state.activeTape = null;\n }\n return { value: y, grads };\n });\n }\n customGrad(f) {\n util.assert(util.isFunction(f), () => 'The f passed in customGrad(f) must be a function.');\n return (...inputs) => {\n util.assert(inputs.every(t => t instanceof Tensor), () => 'The args passed in customGrad(f)(x1, x2,...) must all be ' +\n 'tensors');\n let res;\n const inputMap = {};\n inputs.forEach((input, i) => {\n inputMap[i] = input;\n });\n return this.runKernelFunc((_, save) => {\n res = f(...[...inputs, save]);\n util.assert(res.value instanceof Tensor, () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.value` is a tensor');\n util.assert(util.isFunction(res.gradFunc), () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.gradFunc` is a function.');\n return res.value;\n }, inputMap, (dy, saved) => {\n const gradRes = res.gradFunc(dy, saved);\n const grads = Array.isArray(gradRes) ? gradRes : [gradRes];\n util.assert(grads.length === inputs.length, () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.gradFunc` is a function that returns ' +\n 'the same number of tensors as inputs passed to f(...).');\n util.assert(grads.every(t => t instanceof Tensor), () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.gradFunc` is a function that returns ' +\n 'a list of only tensors.');\n const gradMap = {};\n grads.forEach((grad, i) => {\n gradMap[i] = () => grad;\n });\n return gradMap;\n });\n };\n }\n readSync(dataId) {\n // Route the read to the correct backend.\n const info = this.state.tensorInfo.get(dataId);\n return info.backend.readSync(dataId);\n }\n read(dataId) {\n // Route the read to the correct backend.\n const info = this.state.tensorInfo.get(dataId);\n return info.backend.read(dataId);\n }\n async time(query) {\n const start = now();\n const timingInfo = await this.backend.time(query);\n timingInfo.wallMs = now() - start;\n return timingInfo;\n }\n /**\n * Tracks a Tensor in the current scope to be automatically cleaned up\n * when the current scope ends, and returns the value.\n *\n * @param result The Tensor to track in the current scope.\n */\n track(result) {\n if (this.state.activeScope != null) {\n result.scopeId = this.state.activeScope.id;\n this.state.activeScope.track.push(result);\n }\n return result;\n }\n get registeredVariables() {\n return this.state.registeredVariables;\n }\n /**\n * Resets the engine state. Removes all backends but does not remove\n * registered backend factories.\n */\n reset() {\n // Make any pending promise obsolete.\n this.pendingBackendInitId++;\n this.state.dispose();\n this.ENV.reset();\n this.state = new EngineState();\n for (const backendName in this.registry) {\n this.disposeRegisteredKernels(backendName);\n this.registry[backendName].dispose();\n delete this.registry[backendName];\n }\n this.backendName = null;\n this.backendInstance = null;\n this.pendingBackendInit = null;\n }\n}\nEngine.nextTensorId = 0;\nEngine.nextVariableId = 0;\nfunction ones(shape) {\n const values = makeOnesTypedArray(sizeFromShape(shape), 'float32');\n return ENGINE.makeTensor(values, shape, 'float32');\n}\nexport function getOrMakeEngine() {\n const ns = getGlobalNamespace();\n if (ns._tfengine == null) {\n const environment = new Environment(ns);\n ns._tfengine = new Engine(environment);\n }\n setEnvironmentGlobal(ns._tfengine.ENV);\n // Tell the current tensor interface that the global engine is responsible\n // for tracking.\n setTensorTracker(() => ns._tfengine);\n return ns._tfengine;\n}\nexport const ENGINE = getOrMakeEngine();\n/**\n * A implementation of the add op for use within engine and tape.\n *\n * This allows us to avoid a circular dependency between add.ts and engine.\n * It is exported to be available in tape tests.\n */\nexport function add(a, b) {\n // We duplicate Add here to avoid a circular dependency with add.ts.\n const inputs = { a, b };\n return ENGINE.runKernelFunc((backend, save) => {\n const res = backend.add(a, b);\n save([a, b]);\n return res;\n }, inputs, null /* gradient */, Add);\n}\n//# sourceMappingURL=engine.js.map","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport * as util from './util';\n/**\n * Computes a list of TapeNodes that connect x to y, filtering everything else\n * out and preserving the order of the original tape elements.\n *\n * @param tape The tape elements to filter.\n * @param xs The input Tensors.\n * @param y The output Tensor.\n */\nexport function getFilteredNodesXToY(tape, xs, y) {\n // Forward pass to compute all the nodes and Tensors that are transitively a\n // function of x.\n const tensorsFromX = {};\n const nodesFromX = {};\n for (let i = 0; i < xs.length; i++) {\n tensorsFromX[xs[i].id] = true;\n }\n for (let i = 0; i < tape.length; i++) {\n const node = tape[i];\n const nodeInputs = node.inputs;\n for (const inputName in nodeInputs) {\n const input = nodeInputs[inputName];\n let anyInputFromX = false;\n for (let j = 0; j < xs.length; j++) {\n if (tensorsFromX[input.id]) {\n node.outputs.forEach(output => tensorsFromX[output.id] = true);\n anyInputFromX = true;\n nodesFromX[node.id] = true;\n break;\n }\n }\n if (anyInputFromX) {\n break;\n }\n }\n }\n // Backward pass to find all of the nodes and Tensors that lead to y.\n const tensorsLeadToY = {};\n tensorsLeadToY[y.id] = true;\n const nodesToY = {};\n for (let i = tape.length - 1; i >= 0; i--) {\n const node = tape[i];\n const nodeInputs = node.inputs;\n // If any of the outputs lead to y, mark all of the inputs as leading to y.\n for (let j = 0; j < node.outputs.length; j++) {\n if (tensorsLeadToY[node.outputs[j].id]) {\n for (const inputName in nodeInputs) {\n tensorsLeadToY[nodeInputs[inputName].id] = true;\n nodesToY[node.id] = true;\n }\n break;\n }\n }\n }\n // Return the paths that come from x and lead to y.\n const filteredTape = [];\n for (let i = 0; i < tape.length; i++) {\n const node = tape[i];\n if (nodesFromX[node.id] && nodesToY[node.id]) {\n // Prune the inputs from the node that aren't a function of x.\n const prunedInputs = {};\n for (const inputName in node.inputs) {\n const nodeInput = node.inputs[inputName];\n if (tensorsFromX[nodeInput.id]) {\n prunedInputs[inputName] = nodeInput;\n }\n }\n // Copy the node and overwrite inputsAndArgs to the pruned version.\n const prunedNode = Object.assign({}, node);\n prunedNode.inputs = prunedInputs;\n prunedNode.outputs = node.outputs;\n filteredTape.push(prunedNode);\n }\n }\n return filteredTape;\n}\n/**\n * Backpropagate gradients through the filtered TapeNodes.\n *\n * @param tensorAccumulatedGradientMap A map of Tensor to its gradient. This map\n * is mutated by this method.\n * @param filteredTape The filtered TapeNodes to backprop through.\n */\nexport function backpropagateGradients(tensorAccumulatedGradientMap, filteredTape, tidy, add) {\n // Walk the tape backward and keep a map of Tensor to its gradient.\n for (let i = filteredTape.length - 1; i >= 0; i--) {\n const node = filteredTape[i];\n const dys = [];\n node.outputs.forEach(o => {\n const gradTensor = tensorAccumulatedGradientMap[o.id];\n if (gradTensor != null) {\n dys.push(gradTensor);\n }\n else {\n // This particular output is not in the back-propagation subgraph, so it\n // does not affect the final output, thus we put null for its dy.\n dys.push(null);\n }\n });\n if (node.gradient == null) {\n throw new Error(`Cannot compute gradient: gradient function not found ` +\n `for ${node.kernelName}.`);\n }\n // Backprop dy through this node and accumulate gradients over the inputs.\n const inputGradients = node.gradient(dys);\n for (const inputName in node.inputs) {\n if (!(inputName in inputGradients)) {\n throw new Error(`Cannot backprop through input ${inputName}. ` +\n `Available gradients found: ${Object.keys(inputGradients)}.`);\n }\n // Call the gradient function.\n const dx = tidy(() => inputGradients[inputName]());\n if (dx.dtype !== 'float32') {\n throw new Error(`Error in gradient for op ${node.kernelName}. The gradient of input ` +\n `${inputName} must have 'float32' dtype, but has '${dx.dtype}'`);\n }\n const x = node.inputs[inputName];\n if (!util.arraysEqual(dx.shape, x.shape)) {\n throw new Error(`Error in gradient for op ${node.kernelName}. The gradient of input ` +\n `'${inputName}' has shape '${dx.shape}', which does not match ` +\n `the shape of the input '${x.shape}'`);\n }\n if (tensorAccumulatedGradientMap[x.id] == null) {\n tensorAccumulatedGradientMap[x.id] = dx;\n }\n else {\n const curGradient = tensorAccumulatedGradientMap[x.id];\n tensorAccumulatedGradientMap[x.id] = add(curGradient, dx);\n curGradient.dispose();\n }\n }\n }\n}\n//# sourceMappingURL=tape.js.map","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// tslint:disable-next-line:no-any\nfunction _isNavigatorDefined() {\n return typeof navigator !== 'undefined' && navigator != null;\n}\nexport function isMobile() {\n if (_isNavigatorDefined()) {\n // tslint:disable-next-line:no-any\n const a = navigator.userAgent || navigator.vendor || window.opera;\n // tslint:disable-next-line:max-line-length\n return /(android|bb\\d+|meego).+mobile|avantgo|bada\\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\\.(browser|link)|vodafone|wap|windows ce|xda|xiino/i\n .test(a) ||\n // tslint:disable-next-line:max-line-length\n /1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\\-(n|u)|c55\\/|capi|ccwa|cdm\\-|cell|chtm|cldc|cmd\\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\\-s|devi|dica|dmob|do(c|p)o|ds(12|\\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\\-|_)|g1 u|g560|gene|gf\\-5|g\\-mo|go(\\.w|od)|gr(ad|un)|haie|hcit|hd\\-(m|p|t)|hei\\-|hi(pt|ta)|hp( i|ip)|hs\\-c|ht(c(\\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\\-(20|go|ma)|i230|iac( |\\-|\\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\\/)|klon|kpt |kwc\\-|kyo(c|k)|le(no|xi)|lg( g|\\/(k|l|u)|50|54|\\-[a-w])|libw|lynx|m1\\-w|m3ga|m50\\/|ma(te|ui|xo)|mc(01|21|ca)|m\\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\\-2|po(ck|rt|se)|prox|psio|pt\\-g|qa\\-a|qc(07|12|21|32|60|\\-[2-7]|i\\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\\-|oo|p\\-)|sdk\\/|se(c(\\-|0|1)|47|mc|nd|ri)|sgh\\-|shar|sie(\\-|m)|sk\\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\\-|v\\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\\-|tdg\\-|tel(i|m)|tim\\-|t\\-mo|to(pl|sh)|ts(70|m\\-|m3|m5)|tx\\-9|up(\\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\\-|your|zeto|zte\\-/i\n .test(a.substr(0, 4));\n }\n return false;\n}\nexport function isBrowser() {\n return (typeof window !== 'undefined' && window.document != null) ||\n //@ts-ignore\n (typeof WorkerGlobalScope !== 'undefined');\n}\n//# sourceMappingURL=device_util.js.map","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport './engine';\nimport * as device_util from './device_util';\nimport { env } from './environment';\nconst ENV = env();\n/**\n * This file contains environment-related flag registrations.\n */\n/** Whether to enable debug mode. */\nENV.registerFlag('DEBUG', () => false, debugValue => {\n if (debugValue) {\n console.warn('Debugging mode is ON. The output of every math call will ' +\n 'be downloaded to CPU and checked for NaNs. ' +\n 'This significantly impacts performance.');\n }\n});\n/** Whether we are in a browser (as versus, say, node.js) environment. */\nENV.registerFlag('IS_BROWSER', () => device_util.isBrowser());\n/** Whether we are in a browser (as versus, say, node.js) environment. */\nENV.registerFlag('IS_NODE', () => (typeof process !== 'undefined') &&\n (typeof process.versions !== 'undefined') &&\n (typeof process.versions.node !== 'undefined'));\n/** Whether this browser is Chrome. */\nENV.registerFlag('IS_CHROME', () => typeof navigator !== 'undefined' && navigator != null &&\n navigator.userAgent != null && /Chrome/.test(navigator.userAgent) &&\n /Google Inc/.test(navigator.vendor));\n/**\n * True when the environment is \"production\" where we disable safety checks\n * to gain performance.\n */\nENV.registerFlag('PROD', () => false);\n/**\n * Whether to do sanity checks when inferring a shape from user-provided\n * values, used when creating a new tensor.\n */\nENV.registerFlag('TENSORLIKE_CHECK_SHAPE_CONSISTENCY', () => ENV.getBool('DEBUG'));\n/** Whether deprecation warnings are enabled. */\nENV.registerFlag('DEPRECATION_WARNINGS_ENABLED', () => true);\n/** True if running unit tests. */\nENV.registerFlag('IS_TEST', () => false);\n//# sourceMappingURL=flags.js.map","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from './engine';\nimport { env } from './environment';\nimport { Tensor } from './tensor';\nimport { assert, flatten, inferDtype, isTypedArray, toTypedArray } from './util';\nexport function inferShape(val, dtype) {\n let firstElem = val;\n if (isTypedArray(val)) {\n return dtype === 'string' ? [] : [val.length];\n }\n if (!Array.isArray(val)) {\n return []; // Scalar.\n }\n const shape = [];\n while (Array.isArray(firstElem) ||\n isTypedArray(firstElem) && dtype !== 'string') {\n shape.push(firstElem.length);\n firstElem = firstElem[0];\n }\n if (Array.isArray(val) &&\n env().getBool('TENSORLIKE_CHECK_SHAPE_CONSISTENCY')) {\n deepAssertShapeConsistency(val, shape, []);\n }\n return shape;\n}\nfunction deepAssertShapeConsistency(val, shape, indices) {\n indices = indices || [];\n if (!(Array.isArray(val)) && !isTypedArray(val)) {\n assert(shape.length === 0, () => `Element arr[${indices.join('][')}] is a primitive, ` +\n `but should be an array/TypedArray of ${shape[0]} elements`);\n return;\n }\n assert(shape.length > 0, () => `Element arr[${indices.join('][')}] should be a primitive, ` +\n `but is an array of ${val.length} elements`);\n assert(val.length === shape[0], () => `Element arr[${indices.join('][')}] should have ${shape[0]} ` +\n `elements, but has ${val.length} elements`);\n const subShape = shape.slice(1);\n for (let i = 0; i < val.length; ++i) {\n deepAssertShapeConsistency(val[i], subShape, indices.concat(i));\n }\n}\nfunction assertDtype(expectedDtype, actualDType, argName, functionName) {\n if (expectedDtype == null) {\n return;\n }\n if (expectedDtype !== 'numeric' && expectedDtype !== actualDType ||\n expectedDtype === 'numeric' && actualDType === 'string') {\n throw new Error(`Argument '${argName}' passed to '${functionName}' must ` +\n `be ${expectedDtype} tensor, but got ${actualDType} tensor`);\n }\n}\nexport function convertToTensor(x, argName, functionName, parseAsDtype = 'numeric') {\n if (x instanceof Tensor) {\n assertDtype(parseAsDtype, x.dtype, argName, functionName);\n return x;\n }\n let inferredDtype = inferDtype(x);\n // If the user expects a bool/int/float, use that info to update the\n // inferredDtype when it is not a string.\n if (inferredDtype !== 'string' &&\n ['bool', 'int32', 'float32'].indexOf(parseAsDtype) >= 0) {\n inferredDtype = parseAsDtype;\n }\n assertDtype(parseAsDtype, inferredDtype, argName, functionName);\n if ((x == null) ||\n (!isTypedArray(x) && !Array.isArray(x) && typeof x !== 'number' &&\n typeof x !== 'boolean' && typeof x !== 'string')) {\n const type = x == null ? 'null' : x.constructor.name;\n throw new Error(`Argument '${argName}' passed to '${functionName}' must be a ` +\n `Tensor or TensorLike, but got '${type}'`);\n }\n const inferredShape = inferShape(x, inferredDtype);\n if (!isTypedArray(x) && !Array.isArray(x)) {\n x = [x];\n }\n const skipTypedArray = true;\n const values = inferredDtype !== 'string' ?\n toTypedArray(x, inferredDtype) :\n flatten(x, [], skipTypedArray);\n return ENGINE.makeTensor(values, inferredShape, inferredDtype);\n}\nexport function convertToTensorArray(arg, argName, functionName, parseAsDtype = 'numeric') {\n if (!Array.isArray(arg)) {\n throw new Error(`Argument ${argName} passed to ${functionName} must be a ` +\n '`Tensor[]` or `TensorLike[]`');\n }\n const tensors = arg;\n return tensors.map((t, i) => convertToTensor(t, `${argName}[${i}]`, functionName), parseAsDtype);\n}\n//# sourceMappingURL=tensor_util_env.js.map","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { isPromise } from '../util';\nexport const OP_SCOPE_SUFFIX = '__op';\n/**\n * Used for wrapping functions that perform math operations on\n * Tensors. The function will be wrapped in a named scope that cleans all\n * memory usage after the function is done.\n */\nexport function op(f) {\n const keys = Object.keys(f);\n if (keys.length !== 1) {\n throw new Error(`Please provide an object with a single key ` +\n `(operation name) mapping to a function. Got an object with ` +\n `${keys.length} keys.`);\n }\n let opName = keys[0];\n const fn = f[opName];\n // Strip the underscore from the end of the function name.\n if (opName.endsWith('_')) {\n opName = opName.substring(0, opName.length - 1);\n }\n // add an __op suffix to distinguish ops from kernels in tf.profile\n opName = opName + OP_SCOPE_SUFFIX;\n // tslint:disable-next-line:no-any\n const f2 = (...args) => {\n ENGINE.startScope(opName);\n try {\n const result = fn(...args);\n if (isPromise(result)) {\n console.error('Cannot return a Promise inside of tidy.');\n }\n ENGINE.endScope(result);\n return result;\n }\n catch (ex) {\n ENGINE.endScope(null);\n throw ex;\n }\n };\n Object.defineProperty(f2, 'name', { value: opName, configurable: true });\n // tslint:disable-next-line:no-any\n return f2;\n}\n//# sourceMappingURL=operation.js.map","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { Complex } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { op } from './operation';\n/**\n * Converts two real numbers to a complex number.\n *\n * Given a tensor `real` representing the real part of a complex number, and a\n * tensor `imag` representing the imaginary part of a complex number, this\n * operation returns complex numbers elementwise of the form [r0, i0, r1, i1],\n * where r represents the real part and i represents the imag part.\n *\n * The input tensors real and imag must have the same shape.\n *\n * ```js\n * const real = tf.tensor1d([2.25, 3.25]);\n * const imag = tf.tensor1d([4.75, 5.75]);\n * const complex = tf.complex(real, imag);\n *\n * complex.print();\n * ```\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction complex_(real, imag) {\n const $real = convertToTensor(real, 'real', 'complex');\n const $imag = convertToTensor(imag, 'imag', 'complex');\n util.assertShapesMatch($real.shape, $imag.shape, `real and imag shapes, ${$real.shape} and ${$imag.shape}, ` +\n `must match in call to tf.complex().`);\n const forward = (backend) => {\n return backend.complex($real, $imag);\n };\n const inputs = { real: $real, imag: $imag };\n return ENGINE.runKernelFunc(forward, inputs, null /* gradient */, Complex);\n}\nexport const complex = op({ complex_ });\n//# sourceMappingURL=complex.js.map","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { assert, assertNonNegativeIntegerDimensions, flatten, inferDtype, isTypedArray, sizeFromShape, toTypedArray } from '../util';\n/** This is shared code across all tensor creation methods. */\nexport function makeTensor(values, shape, inferredShape, dtype) {\n if (dtype == null) {\n dtype = inferDtype(values);\n }\n if (dtype === 'complex64') {\n throw new Error(`Cannot construct a complex64 tensor directly. ` +\n `Please use tf.complex(real, imag).`);\n }\n if (!isTypedArray(values) && !Array.isArray(values) &&\n typeof values !== 'number' && typeof values !== 'boolean' &&\n typeof values !== 'string') {\n throw new Error('values passed to tensor(values) must be a number/boolean/string or ' +\n 'an array of numbers/booleans/strings, or a TypedArray');\n }\n if (shape != null) {\n assertNonNegativeIntegerDimensions(shape);\n const providedSize = sizeFromShape(shape);\n const inferredSize = sizeFromShape(inferredShape);\n assert(providedSize === inferredSize, () => `Based on the provided shape, [${shape}], the tensor should have ` +\n `${providedSize} values but has ${inferredSize}`);\n for (let i = 0; i < inferredShape.length; ++i) {\n const inferred = inferredShape[i];\n const flatDimsDontMatch = i === inferredShape.length - 1 ?\n inferred !== sizeFromShape(shape.slice(i)) :\n true;\n assert(inferredShape[i] === shape[i] || !flatDimsDontMatch, () => `Error creating a new Tensor. Inferred shape ` +\n `(${inferredShape}) does not match the provided ` +\n `shape (${shape}). `);\n }\n }\n if (!isTypedArray(values) && !Array.isArray(values)) {\n values = [values];\n }\n shape = shape || inferredShape;\n values = dtype !== 'string' ?\n toTypedArray(values, dtype) :\n flatten(values, [], true);\n return ENGINE.makeTensor(values, shape, dtype);\n}\n//# sourceMappingURL=tensor_ops_util.js.map","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { inferShape } from '../tensor_util_env';\nimport { makeTensor } from './tensor_ops_util';\n/**\n * Creates a `tf.Tensor` with the provided values, shape and dtype.\n *\n * ```js\n * // Pass an array of values to create a vector.\n * tf.tensor([1, 2, 3, 4]).print();\n * ```\n *\n * ```js\n * // Pass a nested array of values to make a matrix or a higher\n * // dimensional tensor.\n * tf.tensor([[1, 2], [3, 4]]).print();\n * ```\n *\n * ```js\n * // Pass a flat array and specify a shape yourself.\n * tf.tensor([1, 2, 3, 4], [2, 2]).print();\n * ```\n *\n * @param values The values of the tensor. Can be nested array of numbers,\n * or a flat array, or a `TypedArray`. If the values are strings,\n * they will be encoded as utf-8 and kept as `Uint8Array[]`.\n * @param shape The shape of the tensor. Optional. If not provided,\n * it is inferred from `values`.\n * @param dtype The data type.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function tensor(values, shape, dtype) {\n const inferredShape = inferShape(values, dtype);\n return makeTensor(values, shape, inferredShape, dtype);\n}\n//# sourceMappingURL=tensor.js.map","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/* Type definitions for exporting and importing of models. */\n/**\n * A map from Tensor dtype to number of bytes per element of the Tensor.\n */\nexport const DTYPE_VALUE_SIZE_MAP = {\n 'float32': 4,\n 'float16': 2,\n 'int32': 4,\n 'uint16': 2,\n 'uint8': 1,\n 'bool': 1,\n 'complex64': 8\n};\n//# sourceMappingURL=types.js.map","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { complex } from '../ops/complex';\nimport { tensor } from '../ops/tensor';\nimport { sizeFromShape } from '../util';\nimport { DTYPE_VALUE_SIZE_MAP } from './types';\n/** Number of bytes reserved for the length of the string. (32bit integer). */\nconst NUM_BYTES_STRING_LENGTH = 4;\n/**\n * Encode a map from names to weight values as an ArrayBuffer, along with an\n * `Array` of `WeightsManifestEntry` as specification of the encoded weights.\n *\n * This function does not perform sharding.\n *\n * This function is the reverse of `decodeWeights`.\n *\n * @param tensors A map (\"dict\") from names to tensors.\n * @param group Group to which the weights belong (optional).\n * @returns A `Promise` of\n * - A flat `ArrayBuffer` with all the binary values of the `Tensor`s\n * concatenated.\n * - An `Array` of `WeightManifestEntry`s, carrying information including\n * tensor names, `dtype`s and shapes.\n * @throws Error: on unsupported tensor `dtype`.\n */\nexport async function encodeWeights(tensors, group) {\n // TODO(adarob, cais): Support quantization.\n const specs = [];\n const dataPromises = [];\n const names = Array.isArray(tensors) ?\n tensors.map(tensor => tensor.name) :\n Object.keys(tensors);\n for (let i = 0; i < names.length; ++i) {\n const name = names[i];\n const t = Array.isArray(tensors) ? tensors[i].tensor : tensors[name];\n if (t.dtype !== 'float32' && t.dtype !== 'int32' && t.dtype !== 'bool' &&\n t.dtype !== 'string' && t.dtype !== 'complex64') {\n throw new Error(`Unsupported dtype in weight '${name}': ${t.dtype}`);\n }\n const spec = { name, shape: t.shape, dtype: t.dtype };\n if (t.dtype === 'string') {\n const utf8bytes = new Promise(async (resolve) => {\n const vals = await t.bytes();\n const totalNumBytes = vals.reduce((p, c) => p + c.length, 0) +\n NUM_BYTES_STRING_LENGTH * vals.length;\n const bytes = new Uint8Array(totalNumBytes);\n let offset = 0;\n for (let i = 0; i < vals.length; i++) {\n const val = vals[i];\n const bytesOfLength = new Uint8Array(new Uint32Array([val.length]).buffer);\n bytes.set(bytesOfLength, offset);\n offset += NUM_BYTES_STRING_LENGTH;\n bytes.set(val, offset);\n offset += val.length;\n }\n resolve(bytes);\n });\n dataPromises.push(utf8bytes);\n }\n else {\n dataPromises.push(t.data());\n }\n if (group != null) {\n spec.group = group;\n }\n specs.push(spec);\n }\n const tensorValues = await Promise.all(dataPromises);\n return { data: concatenateTypedArrays(tensorValues), specs };\n}\n/**\n * Decode flat ArrayBuffer as weights.\n *\n * This function does not handle sharding.\n *\n * This function is the reverse of `encodeWeights`.\n *\n * @param buffer A flat ArrayBuffer carrying the binary values of the tensors\n * concatenated in the order specified in `specs`.\n * @param specs Specifications of the names, dtypes and shapes of the tensors\n * whose value are encoded by `buffer`.\n * @return A map from tensor name to tensor value, with the names corresponding\n * to names in `specs`.\n * @throws Error, if any of the tensors has unsupported dtype.\n */\nexport function decodeWeights(buffer, specs) {\n // TODO(adarob, cais): Support quantization.\n const out = {};\n let float16Decode;\n let offset = 0;\n for (const spec of specs) {\n const name = spec.name;\n const dtype = spec.dtype;\n const shape = spec.shape;\n const size = sizeFromShape(shape);\n let values;\n if ('quantization' in spec) {\n const quantization = spec.quantization;\n if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') {\n if (!('min' in quantization && 'scale' in quantization)) {\n throw new Error(`Weight ${spec.name} with quantization ${quantization.dtype} ` +\n `doesn't have corresponding metadata min and scale.`);\n }\n }\n else if (quantization.dtype === 'float16') {\n if (dtype !== 'float32') {\n throw new Error(`Weight ${spec.name} is quantized with ${quantization.dtype} ` +\n `which only supports weights of type float32 not ${dtype}.`);\n }\n }\n else {\n throw new Error(`Weight ${spec.name} has unknown ` +\n `quantization dtype ${quantization.dtype}. ` +\n `Supported quantization dtypes are: ` +\n `'uint8', 'uint16', and 'float16'.`);\n }\n const quantizationSizeFactor = DTYPE_VALUE_SIZE_MAP[quantization.dtype];\n const byteBuffer = buffer.slice(offset, offset + size * quantizationSizeFactor);\n const quantizedArray = (quantization.dtype === 'uint8') ?\n new Uint8Array(byteBuffer) :\n new Uint16Array(byteBuffer);\n if (dtype === 'float32') {\n if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') {\n values = new Float32Array(quantizedArray.length);\n for (let i = 0; i < quantizedArray.length; i++) {\n const v = quantizedArray[i];\n values[i] = v * quantization.scale + quantization.min;\n }\n }\n else if (quantization.dtype === 'float16') {\n if (float16Decode === undefined) {\n float16Decode = getFloat16Decoder();\n }\n values = float16Decode(quantizedArray);\n }\n else {\n throw new Error(`Unsupported quantization type ${quantization.dtype} ` +\n `for weight type float32.`);\n }\n }\n else if (dtype === 'int32') {\n if (quantization.dtype !== 'uint8' && quantization.dtype !== 'uint16') {\n throw new Error(`Unsupported quantization type ${quantization.dtype} ` +\n `for weight type int32.`);\n }\n values = new Int32Array(quantizedArray.length);\n for (let i = 0; i < quantizedArray.length; i++) {\n const v = quantizedArray[i];\n values[i] = Math.round(v * quantization.scale + quantization.min);\n }\n }\n else {\n throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`);\n }\n offset += size * quantizationSizeFactor;\n }\n else if (dtype === 'string') {\n const size = sizeFromShape(spec.shape);\n values = [];\n for (let i = 0; i < size; i++) {\n const byteLength = new Uint32Array(buffer.slice(offset, offset + NUM_BYTES_STRING_LENGTH))[0];\n offset += NUM_BYTES_STRING_LENGTH;\n const bytes = new Uint8Array(buffer.slice(offset, offset + byteLength));\n values.push(bytes);\n offset += byteLength;\n }\n }\n else {\n const dtypeFactor = DTYPE_VALUE_SIZE_MAP[dtype];\n const byteBuffer = buffer.slice(offset, offset + size * dtypeFactor);\n if (dtype === 'float32') {\n values = new Float32Array(byteBuffer);\n }\n else if (dtype === 'int32') {\n values = new Int32Array(byteBuffer);\n }\n else if (dtype === 'bool') {\n values = new Uint8Array(byteBuffer);\n }\n else if (dtype === 'complex64') {\n values = new Float32Array(byteBuffer);\n const real = new Float32Array(values.length / 2);\n const image = new Float32Array(values.length / 2);\n for (let i = 0; i < real.length; i++) {\n real[i] = values[i * 2];\n image[i] = values[i * 2 + 1];\n }\n const realTensor = tensor(real, shape, 'float32');\n const imageTensor = tensor(image, shape, 'float32');\n out[name] = complex(realTensor, imageTensor);\n realTensor.dispose();\n imageTensor.dispose();\n }\n else {\n throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`);\n }\n offset += size * dtypeFactor;\n }\n if (dtype !== 'complex64') {\n out[name] = tensor(values, shape, dtype);\n }\n }\n return out;\n}\n/**\n * Concatenate TypedArrays into an ArrayBuffer.\n */\nexport function concatenateTypedArrays(xs) {\n // TODO(adarob, cais): Support quantization.\n if (xs === null) {\n throw new Error(`Invalid input value: ${JSON.stringify(xs)}`);\n }\n let totalByteLength = 0;\n // `normalizedXs` is here for this reason: a `TypedArray`'s `buffer'\n // can have a different byte length from that of the `TypedArray` itself,\n // for example, when the `TypedArray` is created from an offset in an\n // `ArrayBuffer`. `normliazedXs` holds `TypedArray`s whose `buffer`s match\n // the `TypedArray` in byte length. If an element of `xs` does not show\n // this property, a new `TypedArray` that satisfy this property will be\n // constructed and pushed into `normalizedXs`.\n const normalizedXs = [];\n xs.forEach((x) => {\n totalByteLength += x.byteLength;\n // tslint:disable:no-any\n normalizedXs.push(x.byteLength === x.buffer.byteLength ? x :\n new x.constructor(x));\n if (!(x instanceof Float32Array || x instanceof Int32Array ||\n x instanceof Uint8Array)) {\n throw new Error(`Unsupported TypedArray subtype: ${x.constructor.name}`);\n }\n // tslint:enable:no-any\n });\n const y = new Uint8Array(totalByteLength);\n let offset = 0;\n normalizedXs.forEach((x) => {\n y.set(new Uint8Array(x.buffer), offset);\n offset += x.byteLength;\n });\n return y.buffer;\n}\n// Use Buffer on Node.js instead of Blob/atob/btoa\nconst useNodeBuffer = typeof Buffer !== 'undefined' &&\n (typeof Blob === 'undefined' || typeof atob === 'undefined' ||\n typeof btoa === 'undefined');\n/**\n * Calculate the byte length of a JavaScript string.\n *\n * Note that a JavaScript string can contain wide characters, therefore the\n * length of the string is not necessarily equal to the byte length.\n *\n * @param str Input string.\n * @returns Byte length.\n */\nexport function stringByteLength(str) {\n if (useNodeBuffer) {\n return Buffer.byteLength(str);\n }\n return new Blob([str]).size;\n}\n/**\n * Encode an ArrayBuffer as a base64 encoded string.\n *\n * @param buffer `ArrayBuffer` to be converted.\n * @returns A string that base64-encodes `buffer`.\n */\nexport function arrayBufferToBase64String(buffer) {\n if (useNodeBuffer) {\n return Buffer.from(buffer).toString('base64');\n }\n const buf = new Uint8Array(buffer);\n let s = '';\n for (let i = 0, l = buf.length; i < l; i++) {\n s += String.fromCharCode(buf[i]);\n }\n return btoa(s);\n}\n/**\n * Decode a base64 string as an ArrayBuffer.\n *\n * @param str Base64 string.\n * @returns Decoded `ArrayBuffer`.\n */\nexport function base64StringToArrayBuffer(str) {\n if (useNodeBuffer) {\n const buf = Buffer.from(str, 'base64');\n return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength);\n }\n const s = atob(str);\n const buffer = new Uint8Array(s.length);\n for (let i = 0; i < s.length; ++i) {\n buffer.set([s.charCodeAt(i)], i);\n }\n return buffer.buffer;\n}\n/**\n * Concatenate a number of ArrayBuffers into one.\n *\n * @param buffers A number of array buffers to concatenate.\n * @returns Result of concatenating `buffers` in order.\n */\nexport function concatenateArrayBuffers(buffers) {\n if (buffers.length === 1) {\n return buffers[0];\n }\n let totalByteLength = 0;\n buffers.forEach((buffer) => {\n totalByteLength += buffer.byteLength;\n });\n const temp = new Uint8Array(totalByteLength);\n let offset = 0;\n buffers.forEach((buffer) => {\n temp.set(new Uint8Array(buffer), offset);\n offset += buffer.byteLength;\n });\n return temp.buffer;\n}\n/**\n * Get the basename of a path.\n *\n * Behaves in a way analogous to Linux's basename command.\n *\n * @param path\n */\nexport function basename(path) {\n const SEPARATOR = '/';\n path = path.trim();\n while (path.endsWith(SEPARATOR)) {\n path = path.slice(0, path.length - 1);\n }\n const items = path.split(SEPARATOR);\n return items[items.length - 1];\n}\n/**\n * Populate ModelArtifactsInfo fields for a model with JSON topology.\n * @param modelArtifacts\n * @returns A ModelArtifactsInfo object.\n */\nexport function getModelArtifactsInfoForJSON(modelArtifacts) {\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('Expected JSON model topology, received ArrayBuffer.');\n }\n return {\n dateSaved: new Date(),\n modelTopologyType: 'JSON',\n modelTopologyBytes: modelArtifacts.modelTopology == null ?\n 0 :\n stringByteLength(JSON.stringify(modelArtifacts.modelTopology)),\n weightSpecsBytes: modelArtifacts.weightSpecs == null ?\n 0 :\n stringByteLength(JSON.stringify(modelArtifacts.weightSpecs)),\n weightDataBytes: modelArtifacts.weightData == null ?\n 0 :\n modelArtifacts.weightData.byteLength,\n };\n}\n/**\n * Computes mantisa table for casting Float16 to Float32\n * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n *\n * @returns Uint32Array, 2048 mantissa lookup values.\n */\nfunction computeFloat16MantisaTable() {\n const convertMantissa = (i) => {\n let m = i << 13;\n let e = 0;\n while ((m & 0x00800000) === 0) {\n e -= 0x00800000;\n m <<= 1;\n }\n m &= ~0x00800000;\n e += 0x38800000;\n return m | e;\n };\n const mantisaTable = new Uint32Array(2048);\n mantisaTable[0] = 0;\n for (let i = 1; i < 1024; i++) {\n mantisaTable[i] = convertMantissa(i);\n }\n for (let i = 1024; i < 2048; i++) {\n mantisaTable[i] = 0x38000000 + ((i - 1024) << 13);\n }\n return mantisaTable;\n}\n/**\n * Computes exponent table for casting Float16 to Float32\n * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n *\n * @returns Uint32Array, 64 exponent lookup values.\n */\nfunction computeFloat16ExponentTable() {\n const exponentTable = new Uint32Array(64);\n exponentTable[0] = 0;\n exponentTable[31] = 0x47800000;\n exponentTable[32] = 0x80000000;\n exponentTable[63] = 0xc7800000;\n for (let i = 1; i < 31; i++) {\n exponentTable[i] = i << 23;\n }\n for (let i = 33; i < 63; i++) {\n exponentTable[i] = 0x80000000 + ((i - 32) << 23);\n }\n return exponentTable;\n}\n/**\n * Computes offset table for casting Float16 to Float32\n * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n *\n * @returns Uint32Array, 6d offset values.\n */\nfunction computeFloat16OffsetTable() {\n const offsetTable = new Uint32Array(64);\n for (let i = 0; i < 64; i++) {\n offsetTable[i] = 1024;\n }\n offsetTable[0] = offsetTable[32] = 0;\n return offsetTable;\n}\n/**\n * Retrieve a Float16 decoder which will decode a ByteArray of Float16 values\n * to a Float32Array.\n *\n * @returns Function (buffer: Uint16Array) => Float32Array which decodes\n * the Uint16Array of Float16 bytes to a Float32Array.\n */\nexport function getFloat16Decoder() {\n // Algorithm is based off of\n // http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n // Cache lookup tables\n const mantisaTable = computeFloat16MantisaTable();\n const exponentTable = computeFloat16ExponentTable();\n const offsetTable = computeFloat16OffsetTable();\n return (quantizedArray) => {\n const buffer = new ArrayBuffer(4 * quantizedArray.length);\n const bufferUint32View = new Uint32Array(buffer);\n for (let index = 0; index < quantizedArray.length; index++) {\n const float16Bits = quantizedArray[index];\n const float32Bits = mantisaTable[offsetTable[float16Bits >> 10] + (float16Bits & 0x3ff)] +\n exponentTable[float16Bits >> 10];\n bufferUint32View[index] = float32Bits;\n }\n return new Float32Array(buffer);\n };\n}\n//# sourceMappingURL=io_utils.js.map","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nexport class IORouterRegistry {\n constructor() {\n this.saveRouters = [];\n this.loadRouters = [];\n }\n static getInstance() {\n if (IORouterRegistry.instance == null) {\n IORouterRegistry.instance = new IORouterRegistry();\n }\n return IORouterRegistry.instance;\n }\n /**\n * Register a save-handler router.\n *\n * @param saveRouter A function that maps a URL-like string onto an instance\n * of `IOHandler` with the `save` method defined or `null`.\n */\n static registerSaveRouter(saveRouter) {\n IORouterRegistry.getInstance().saveRouters.push(saveRouter);\n }\n /**\n * Register a load-handler router.\n *\n * @param loadRouter A function that maps a URL-like string onto an instance\n * of `IOHandler` with the `load` method defined or `null`.\n */\n static registerLoadRouter(loadRouter) {\n IORouterRegistry.getInstance().loadRouters.push(loadRouter);\n }\n /**\n * Look up IOHandler for saving, given a URL-like string.\n *\n * @param url\n * @returns If only one match is found, an instance of IOHandler with the\n * `save` method defined. If no match is found, `null`.\n * @throws Error, if more than one match is found.\n */\n static getSaveHandlers(url) {\n return IORouterRegistry.getHandlers(url, 'save');\n }\n /**\n * Look up IOHandler for loading, given a URL-like string.\n *\n * @param url\n * @param loadOptions Optional, custom load options.\n * @returns All valid handlers for `url`, given the currently registered\n * handler routers.\n */\n static getLoadHandlers(url, loadOptions) {\n return IORouterRegistry.getHandlers(url, 'load', loadOptions);\n }\n static getHandlers(url, handlerType, loadOptions) {\n const validHandlers = [];\n const routers = handlerType === 'load' ?\n IORouterRegistry.getInstance().loadRouters :\n IORouterRegistry.getInstance().saveRouters;\n routers.forEach(router => {\n const handler = router(url, loadOptions);\n if (handler !== null) {\n validHandlers.push(handler);\n }\n });\n return validHandlers;\n }\n}\nexport const registerSaveRouter = (loudRouter) => IORouterRegistry.registerSaveRouter(loudRouter);\nexport const registerLoadRouter = (loudRouter) => IORouterRegistry.registerLoadRouter(loudRouter);\nexport const getSaveHandlers = (url) => IORouterRegistry.getSaveHandlers(url);\nexport const getLoadHandlers = (url, loadOptions) => IORouterRegistry.getLoadHandlers(url, loadOptions);\n//# sourceMappingURL=router_registry.js.map","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport '../flags';\nimport { env } from '../environment';\nimport { getModelArtifactsInfoForJSON } from './io_utils';\nimport { IORouterRegistry } from './router_registry';\nconst DATABASE_NAME = 'tensorflowjs';\nconst DATABASE_VERSION = 1;\n// Model data and ModelArtifactsInfo (metadata) are stored in two separate\n// stores for efficient access of the list of stored models and their metadata.\n// 1. The object store for model data: topology, weights and weight manifests.\nconst MODEL_STORE_NAME = 'models_store';\n// 2. The object store for ModelArtifactsInfo, including meta-information such\n// as the type of topology (JSON vs binary), byte size of the topology, byte\n// size of the weights, etc.\nconst INFO_STORE_NAME = 'model_info_store';\n/**\n * Delete the entire database for tensorflow.js, including the models store.\n */\nexport async function deleteDatabase() {\n const idbFactory = getIndexedDBFactory();\n return new Promise((resolve, reject) => {\n const deleteRequest = idbFactory.deleteDatabase(DATABASE_NAME);\n deleteRequest.onsuccess = () => resolve();\n deleteRequest.onerror = error => reject(error);\n });\n}\nfunction getIndexedDBFactory() {\n if (!env().getBool('IS_BROWSER')) {\n // TODO(cais): Add more info about what IOHandler subtypes are available.\n // Maybe point to a doc page on the web and/or automatically determine\n // the available IOHandlers and print them in the error message.\n throw new Error('Failed to obtain IndexedDB factory because the current environment' +\n 'is not a web browser.');\n }\n // tslint:disable-next-line:no-any\n const theWindow = typeof window === 'undefined' ? self : window;\n const factory = theWindow.indexedDB || theWindow.mozIndexedDB ||\n theWindow.webkitIndexedDB || theWindow.msIndexedDB ||\n theWindow.shimIndexedDB;\n if (factory == null) {\n throw new Error('The current browser does not appear to support IndexedDB.');\n }\n return factory;\n}\nfunction setUpDatabase(openRequest) {\n const db = openRequest.result;\n db.createObjectStore(MODEL_STORE_NAME, { keyPath: 'modelPath' });\n db.createObjectStore(INFO_STORE_NAME, { keyPath: 'modelPath' });\n}\n/**\n * IOHandler subclass: Browser IndexedDB.\n *\n * See the doc string of `browserIndexedDB` for more details.\n */\nexport class BrowserIndexedDB {\n constructor(modelPath) {\n this.indexedDB = getIndexedDBFactory();\n if (modelPath == null || !modelPath) {\n throw new Error('For IndexedDB, modelPath must not be null, undefined or empty.');\n }\n this.modelPath = modelPath;\n }\n async save(modelArtifacts) {\n // TODO(cais): Support saving GraphDef models.\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('BrowserLocalStorage.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n return this.databaseAction(this.modelPath, modelArtifacts);\n }\n async load() {\n return this.databaseAction(this.modelPath);\n }\n /**\n * Perform database action to put model artifacts into or read model artifacts\n * from IndexedDB object store.\n *\n * Whether the action is put or get depends on whether `modelArtifacts` is\n * specified. If it is specified, the action will be put; otherwise the action\n * will be get.\n *\n * @param modelPath A unique string path for the model.\n * @param modelArtifacts If specified, it will be the model artifacts to be\n * stored in IndexedDB.\n * @returns A `Promise` of `SaveResult`, if the action is put, or a `Promise`\n * of `ModelArtifacts`, if the action is get.\n */\n databaseAction(modelPath, modelArtifacts) {\n return new Promise((resolve, reject) => {\n const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);\n openRequest.onupgradeneeded = () => setUpDatabase(openRequest);\n openRequest.onsuccess = () => {\n const db = openRequest.result;\n if (modelArtifacts == null) {\n // Read model out from object store.\n const modelTx = db.transaction(MODEL_STORE_NAME, 'readonly');\n const modelStore = modelTx.objectStore(MODEL_STORE_NAME);\n const getRequest = modelStore.get(this.modelPath);\n getRequest.onsuccess = () => {\n if (getRequest.result == null) {\n db.close();\n return reject(new Error(`Cannot find model with path '${this.modelPath}' ` +\n `in IndexedDB.`));\n }\n else {\n resolve(getRequest.result.modelArtifacts);\n }\n };\n getRequest.onerror = error => {\n db.close();\n return reject(getRequest.error);\n };\n modelTx.oncomplete = () => db.close();\n }\n else {\n // Put model into object store.\n const modelArtifactsInfo = getModelArtifactsInfoForJSON(modelArtifacts);\n // First, put ModelArtifactsInfo into info store.\n const infoTx = db.transaction(INFO_STORE_NAME, 'readwrite');\n let infoStore = infoTx.objectStore(INFO_STORE_NAME);\n const putInfoRequest = infoStore.put({ modelPath: this.modelPath, modelArtifactsInfo });\n let modelTx;\n putInfoRequest.onsuccess = () => {\n // Second, put model data into model store.\n modelTx = db.transaction(MODEL_STORE_NAME, 'readwrite');\n const modelStore = modelTx.objectStore(MODEL_STORE_NAME);\n const putModelRequest = modelStore.put({\n modelPath: this.modelPath,\n modelArtifacts,\n modelArtifactsInfo\n });\n putModelRequest.onsuccess = () => resolve({ modelArtifactsInfo });\n putModelRequest.onerror = error => {\n // If the put-model request fails, roll back the info entry as\n // well.\n infoStore = infoTx.objectStore(INFO_STORE_NAME);\n const deleteInfoRequest = infoStore.delete(this.modelPath);\n deleteInfoRequest.onsuccess = () => {\n db.close();\n return reject(putModelRequest.error);\n };\n deleteInfoRequest.onerror = error => {\n db.close();\n return reject(putModelRequest.error);\n };\n };\n };\n putInfoRequest.onerror = error => {\n db.close();\n return reject(putInfoRequest.error);\n };\n infoTx.oncomplete = () => {\n if (modelTx == null) {\n db.close();\n }\n else {\n modelTx.oncomplete = () => db.close();\n }\n };\n }\n };\n openRequest.onerror = error => reject(openRequest.error);\n });\n }\n}\nBrowserIndexedDB.URL_SCHEME = 'indexeddb://';\nexport const indexedDBRouter = (url) => {\n if (!env().getBool('IS_BROWSER')) {\n return null;\n }\n else {\n if (!Array.isArray(url) && url.startsWith(BrowserIndexedDB.URL_SCHEME)) {\n return browserIndexedDB(url.slice(BrowserIndexedDB.URL_SCHEME.length));\n }\n else {\n return null;\n }\n }\n};\nIORouterRegistry.registerSaveRouter(indexedDBRouter);\nIORouterRegistry.registerLoadRouter(indexedDBRouter);\n/**\n * Creates a browser IndexedDB IOHandler for saving and loading models.\n *\n * ```js\n * const model = tf.sequential();\n * model.add(\n * tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));\n *\n * const saveResult = await model.save('indexeddb://MyModel'));\n * console.log(saveResult);\n * ```\n *\n * @param modelPath A unique identifier for the model to be saved. Must be a\n * non-empty string.\n * @returns An instance of `BrowserIndexedDB` (sublcass of `IOHandler`),\n * which can be used with, e.g., `tf.Model.save`.\n */\nexport function browserIndexedDB(modelPath) {\n return new BrowserIndexedDB(modelPath);\n}\nfunction maybeStripScheme(key) {\n return key.startsWith(BrowserIndexedDB.URL_SCHEME) ?\n key.slice(BrowserIndexedDB.URL_SCHEME.length) :\n key;\n}\nexport class BrowserIndexedDBManager {\n constructor() {\n this.indexedDB = getIndexedDBFactory();\n }\n async listModels() {\n return new Promise((resolve, reject) => {\n const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);\n openRequest.onupgradeneeded = () => setUpDatabase(openRequest);\n openRequest.onsuccess = () => {\n const db = openRequest.result;\n const tx = db.transaction(INFO_STORE_NAME, 'readonly');\n const store = tx.objectStore(INFO_STORE_NAME);\n // tslint:disable:max-line-length\n // Need to cast `store` as `any` here because TypeScript's DOM\n // library does not have the `getAll()` method even though the\n // method is supported in the latest version of most mainstream\n // browsers:\n // https://developer.mozilla.org/en-US/docs/Web/API/IDBObjectStore/getAll\n // tslint:enable:max-line-length\n // tslint:disable-next-line:no-any\n const getAllInfoRequest = store.getAll();\n getAllInfoRequest.onsuccess = () => {\n const out = {};\n for (const item of getAllInfoRequest.result) {\n out[item.modelPath] = item.modelArtifactsInfo;\n }\n resolve(out);\n };\n getAllInfoRequest.onerror = error => {\n db.close();\n return reject(getAllInfoRequest.error);\n };\n tx.oncomplete = () => db.close();\n };\n openRequest.onerror = error => reject(openRequest.error);\n });\n }\n async removeModel(path) {\n path = maybeStripScheme(path);\n return new Promise((resolve, reject) => {\n const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);\n openRequest.onupgradeneeded = () => setUpDatabase(openRequest);\n openRequest.onsuccess = () => {\n const db = openRequest.result;\n const infoTx = db.transaction(INFO_STORE_NAME, 'readwrite');\n const infoStore = infoTx.objectStore(INFO_STORE_NAME);\n const getInfoRequest = infoStore.get(path);\n let modelTx;\n getInfoRequest.onsuccess = () => {\n if (getInfoRequest.result == null) {\n db.close();\n return reject(new Error(`Cannot find model with path '${path}' ` +\n `in IndexedDB.`));\n }\n else {\n // First, delete the entry in the info store.\n const deleteInfoRequest = infoStore.delete(path);\n const deleteModelData = () => {\n // Second, delete the entry in the model store.\n modelTx = db.transaction(MODEL_STORE_NAME, 'readwrite');\n const modelStore = modelTx.objectStore(MODEL_STORE_NAME);\n const deleteModelRequest = modelStore.delete(path);\n deleteModelRequest.onsuccess = () => resolve(getInfoRequest.result.modelArtifactsInfo);\n deleteModelRequest.onerror = error => reject(getInfoRequest.error);\n };\n // Proceed with deleting model data regardless of whether deletion\n // of info data succeeds or not.\n deleteInfoRequest.onsuccess = deleteModelData;\n deleteInfoRequest.onerror = error => {\n deleteModelData();\n db.close();\n return reject(getInfoRequest.error);\n };\n }\n };\n getInfoRequest.onerror = error => {\n db.close();\n return reject(getInfoRequest.error);\n };\n infoTx.oncomplete = () => {\n if (modelTx == null) {\n db.close();\n }\n else {\n modelTx.oncomplete = () => db.close();\n }\n };\n };\n openRequest.onerror = error => reject(openRequest.error);\n });\n }\n}\n//# sourceMappingURL=indexed_db.js.map","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport '../flags';\nimport { env } from '../environment';\nimport { assert } from '../util';\nimport { arrayBufferToBase64String, base64StringToArrayBuffer, getModelArtifactsInfoForJSON } from './io_utils';\nimport { IORouterRegistry } from './router_registry';\nconst PATH_SEPARATOR = '/';\nconst PATH_PREFIX = 'tensorflowjs_models';\nconst INFO_SUFFIX = 'info';\nconst MODEL_TOPOLOGY_SUFFIX = 'model_topology';\nconst WEIGHT_SPECS_SUFFIX = 'weight_specs';\nconst WEIGHT_DATA_SUFFIX = 'weight_data';\nconst MODEL_METADATA_SUFFIX = 'model_metadata';\n/**\n * Purge all tensorflow.js-saved model artifacts from local storage.\n *\n * @returns Paths of the models purged.\n */\nexport function purgeLocalStorageArtifacts() {\n if (!env().getBool('IS_BROWSER') || typeof window === 'undefined' ||\n typeof window.localStorage === 'undefined') {\n throw new Error('purgeLocalStorageModels() cannot proceed because local storage is ' +\n 'unavailable in the current environment.');\n }\n const LS = window.localStorage;\n const purgedModelPaths = [];\n for (let i = 0; i < LS.length; ++i) {\n const key = LS.key(i);\n const prefix = PATH_PREFIX + PATH_SEPARATOR;\n if (key.startsWith(prefix) && key.length > prefix.length) {\n LS.removeItem(key);\n const modelName = getModelPathFromKey(key);\n if (purgedModelPaths.indexOf(modelName) === -1) {\n purgedModelPaths.push(modelName);\n }\n }\n }\n return purgedModelPaths;\n}\nfunction getModelKeys(path) {\n return {\n info: [PATH_PREFIX, path, INFO_SUFFIX].join(PATH_SEPARATOR),\n topology: [PATH_PREFIX, path, MODEL_TOPOLOGY_SUFFIX].join(PATH_SEPARATOR),\n weightSpecs: [PATH_PREFIX, path, WEIGHT_SPECS_SUFFIX].join(PATH_SEPARATOR),\n weightData: [PATH_PREFIX, path, WEIGHT_DATA_SUFFIX].join(PATH_SEPARATOR),\n modelMetadata: [PATH_PREFIX, path, MODEL_METADATA_SUFFIX].join(PATH_SEPARATOR)\n };\n}\n/**\n * Get model path from a local-storage key.\n *\n * E.g., 'tensorflowjs_models/my/model/1/info' --> 'my/model/1'\n *\n * @param key\n */\nfunction getModelPathFromKey(key) {\n const items = key.split(PATH_SEPARATOR);\n if (items.length < 3) {\n throw new Error(`Invalid key format: ${key}`);\n }\n return items.slice(1, items.length - 1).join(PATH_SEPARATOR);\n}\nfunction maybeStripScheme(key) {\n return key.startsWith(BrowserLocalStorage.URL_SCHEME) ?\n key.slice(BrowserLocalStorage.URL_SCHEME.length) :\n key;\n}\n/**\n * IOHandler subclass: Browser Local Storage.\n *\n * See the doc string to `browserLocalStorage` for more details.\n */\nexport class BrowserLocalStorage {\n constructor(modelPath) {\n if (!env().getBool('IS_BROWSER') || typeof window === 'undefined' ||\n typeof window.localStorage === 'undefined') {\n // TODO(cais): Add more info about what IOHandler subtypes are\n // available.\n // Maybe point to a doc page on the web and/or automatically determine\n // the available IOHandlers and print them in the error message.\n throw new Error('The current environment does not support local storage.');\n }\n this.LS = window.localStorage;\n if (modelPath == null || !modelPath) {\n throw new Error('For local storage, modelPath must not be null, undefined or empty.');\n }\n this.modelPath = modelPath;\n this.keys = getModelKeys(this.modelPath);\n }\n /**\n * Save model artifacts to browser local storage.\n *\n * See the documentation to `browserLocalStorage` for details on the saved\n * artifacts.\n *\n * @param modelArtifacts The model artifacts to be stored.\n * @returns An instance of SaveResult.\n */\n async save(modelArtifacts) {\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('BrowserLocalStorage.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n else {\n const topology = JSON.stringify(modelArtifacts.modelTopology);\n const weightSpecs = JSON.stringify(modelArtifacts.weightSpecs);\n const modelArtifactsInfo = getModelArtifactsInfoForJSON(modelArtifacts);\n try {\n this.LS.setItem(this.keys.info, JSON.stringify(modelArtifactsInfo));\n this.LS.setItem(this.keys.topology, topology);\n this.LS.setItem(this.keys.weightSpecs, weightSpecs);\n this.LS.setItem(this.keys.weightData, arrayBufferToBase64String(modelArtifacts.weightData));\n this.LS.setItem(this.keys.modelMetadata, JSON.stringify({\n format: modelArtifacts.format,\n generatedBy: modelArtifacts.generatedBy,\n convertedBy: modelArtifacts.convertedBy,\n userDefinedMetadata: modelArtifacts.userDefinedMetadata\n }));\n return { modelArtifactsInfo };\n }\n catch (err) {\n // If saving failed, clean up all items saved so far.\n this.LS.removeItem(this.keys.info);\n this.LS.removeItem(this.keys.topology);\n this.LS.removeItem(this.keys.weightSpecs);\n this.LS.removeItem(this.keys.weightData);\n this.LS.removeItem(this.keys.modelMetadata);\n throw new Error(`Failed to save model '${this.modelPath}' to local storage: ` +\n `size quota being exceeded is a possible cause of this failure: ` +\n `modelTopologyBytes=${modelArtifactsInfo.modelTopologyBytes}, ` +\n `weightSpecsBytes=${modelArtifactsInfo.weightSpecsBytes}, ` +\n `weightDataBytes=${modelArtifactsInfo.weightDataBytes}.`);\n }\n }\n }\n /**\n * Load a model from local storage.\n *\n * See the documentation to `browserLocalStorage` for details on the saved\n * artifacts.\n *\n * @returns The loaded model (if loading succeeds).\n */\n async load() {\n const info = JSON.parse(this.LS.getItem(this.keys.info));\n if (info == null) {\n throw new Error(`In local storage, there is no model with name '${this.modelPath}'`);\n }\n if (info.modelTopologyType !== 'JSON') {\n throw new Error('BrowserLocalStorage does not support loading non-JSON model ' +\n 'topology yet.');\n }\n const out = {};\n // Load topology.\n const topology = JSON.parse(this.LS.getItem(this.keys.topology));\n if (topology == null) {\n throw new Error(`In local storage, the topology of model '${this.modelPath}' ` +\n `is missing.`);\n }\n out.modelTopology = topology;\n // Load weight specs.\n const weightSpecs = JSON.parse(this.LS.getItem(this.keys.weightSpecs));\n if (weightSpecs == null) {\n throw new Error(`In local storage, the weight specs of model '${this.modelPath}' ` +\n `are missing.`);\n }\n out.weightSpecs = weightSpecs;\n // Load meta-data fields.\n const metadataString = this.LS.getItem(this.keys.modelMetadata);\n if (metadataString != null) {\n const metadata = JSON.parse(metadataString);\n out.format = metadata['format'];\n out.generatedBy = metadata['generatedBy'];\n out.convertedBy = metadata['convertedBy'];\n out.userDefinedMetadata = metadata['userDefinedMetadata'];\n }\n // Load weight data.\n const weightDataBase64 = this.LS.getItem(this.keys.weightData);\n if (weightDataBase64 == null) {\n throw new Error(`In local storage, the binary weight values of model ` +\n `'${this.modelPath}' are missing.`);\n }\n out.weightData = base64StringToArrayBuffer(weightDataBase64);\n return out;\n }\n}\nBrowserLocalStorage.URL_SCHEME = 'localstorage://';\nexport const localStorageRouter = (url) => {\n if (!env().getBool('IS_BROWSER')) {\n return null;\n }\n else {\n if (!Array.isArray(url) && url.startsWith(BrowserLocalStorage.URL_SCHEME)) {\n return browserLocalStorage(url.slice(BrowserLocalStorage.URL_SCHEME.length));\n }\n else {\n return null;\n }\n }\n};\nIORouterRegistry.registerSaveRouter(localStorageRouter);\nIORouterRegistry.registerLoadRouter(localStorageRouter);\n/**\n * Factory function for local storage IOHandler.\n *\n * This `IOHandler` supports both `save` and `load`.\n *\n * For each model's saved artifacts, four items are saved to local storage.\n * - `${PATH_SEPARATOR}/${modelPath}/info`: Contains meta-info about the\n * model, such as date saved, type of the topology, size in bytes, etc.\n * - `${PATH_SEPARATOR}/${modelPath}/topology`: Model topology. For Keras-\n * style models, this is a stringized JSON.\n * - `${PATH_SEPARATOR}/${modelPath}/weight_specs`: Weight specs of the\n * model, can be used to decode the saved binary weight values (see\n * item below).\n * - `${PATH_SEPARATOR}/${modelPath}/weight_data`: Concatenated binary\n * weight values, stored as a base64-encoded string.\n *\n * Saving may throw an `Error` if the total size of the artifacts exceed the\n * browser-specific quota.\n *\n * @param modelPath A unique identifier for the model to be saved. Must be a\n * non-empty string.\n * @returns An instance of `IOHandler`, which can be used with, e.g.,\n * `tf.Model.save`.\n */\nexport function browserLocalStorage(modelPath) {\n return new BrowserLocalStorage(modelPath);\n}\nexport class BrowserLocalStorageManager {\n constructor() {\n assert(env().getBool('IS_BROWSER'), () => 'Current environment is not a web browser');\n assert(typeof window === 'undefined' ||\n typeof window.localStorage !== 'undefined', () => 'Current browser does not appear to support localStorage');\n this.LS = window.localStorage;\n }\n async listModels() {\n const out = {};\n const prefix = PATH_PREFIX + PATH_SEPARATOR;\n const suffix = PATH_SEPARATOR + INFO_SUFFIX;\n for (let i = 0; i < this.LS.length; ++i) {\n const key = this.LS.key(i);\n if (key.startsWith(prefix) && key.endsWith(suffix)) {\n const modelPath = getModelPathFromKey(key);\n out[modelPath] = JSON.parse(this.LS.getItem(key));\n }\n }\n return out;\n }\n async removeModel(path) {\n path = maybeStripScheme(path);\n const keys = getModelKeys(path);\n if (this.LS.getItem(keys.info) == null) {\n throw new Error(`Cannot find model at path '${path}'`);\n }\n const info = JSON.parse(this.LS.getItem(keys.info));\n this.LS.removeItem(keys.info);\n this.LS.removeItem(keys.topology);\n this.LS.removeItem(keys.weightSpecs);\n this.LS.removeItem(keys.weightData);\n return info;\n }\n}\n//# sourceMappingURL=local_storage.js.map","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * Classes and functions for model management across multiple storage mediums.\n *\n * Supported client actions:\n * - Listing models on all registered storage mediums.\n * - Remove model by URL from any registered storage mediums, by using URL\n * string.\n * - Moving or copying model from one path to another in the same medium or from\n * one medium to another, by using URL strings.\n */\nimport { assert } from '../util';\nimport { IORouterRegistry } from './router_registry';\nconst URL_SCHEME_SUFFIX = '://';\nexport class ModelStoreManagerRegistry {\n constructor() {\n this.managers = {};\n }\n static getInstance() {\n if (ModelStoreManagerRegistry.instance == null) {\n ModelStoreManagerRegistry.instance = new ModelStoreManagerRegistry();\n }\n return ModelStoreManagerRegistry.instance;\n }\n /**\n * Register a save-handler router.\n *\n * @param saveRouter A function that maps a URL-like string onto an instance\n * of `IOHandler` with the `save` method defined or `null`.\n */\n static registerManager(scheme, manager) {\n assert(scheme != null, () => 'scheme must not be undefined or null.');\n if (scheme.endsWith(URL_SCHEME_SUFFIX)) {\n scheme = scheme.slice(0, scheme.indexOf(URL_SCHEME_SUFFIX));\n }\n assert(scheme.length > 0, () => 'scheme must not be an empty string.');\n const registry = ModelStoreManagerRegistry.getInstance();\n assert(registry.managers[scheme] == null, () => `A model store manager is already registered for scheme '${scheme}'.`);\n registry.managers[scheme] = manager;\n }\n static getManager(scheme) {\n const manager = this.getInstance().managers[scheme];\n if (manager == null) {\n throw new Error(`Cannot find model manager for scheme '${scheme}'`);\n }\n return manager;\n }\n static getSchemes() {\n return Object.keys(this.getInstance().managers);\n }\n}\n/**\n * Helper method for parsing a URL string into a scheme and a path.\n *\n * @param url E.g., 'localstorage://my-model'\n * @returns A dictionary with two fields: scheme and path.\n * Scheme: e.g., 'localstorage' in the example above.\n * Path: e.g., 'my-model' in the example above.\n */\nfunction parseURL(url) {\n if (url.indexOf(URL_SCHEME_SUFFIX) === -1) {\n throw new Error(`The url string provided does not contain a scheme. ` +\n `Supported schemes are: ` +\n `${ModelStoreManagerRegistry.getSchemes().join(',')}`);\n }\n return {\n scheme: url.split(URL_SCHEME_SUFFIX)[0],\n path: url.split(URL_SCHEME_SUFFIX)[1],\n };\n}\nasync function cloneModelInternal(sourceURL, destURL, deleteSource = false) {\n assert(sourceURL !== destURL, () => `Old path and new path are the same: '${sourceURL}'`);\n const loadHandlers = IORouterRegistry.getLoadHandlers(sourceURL);\n assert(loadHandlers.length > 0, () => `Copying failed because no load handler is found for source URL ${sourceURL}.`);\n assert(loadHandlers.length < 2, () => `Copying failed because more than one (${loadHandlers.length}) ` +\n `load handlers for source URL ${sourceURL}.`);\n const loadHandler = loadHandlers[0];\n const saveHandlers = IORouterRegistry.getSaveHandlers(destURL);\n assert(saveHandlers.length > 0, () => `Copying failed because no save handler is found for destination ` +\n `URL ${destURL}.`);\n assert(saveHandlers.length < 2, () => `Copying failed because more than one (${loadHandlers.length}) ` +\n `save handlers for destination URL ${destURL}.`);\n const saveHandler = saveHandlers[0];\n const sourceScheme = parseURL(sourceURL).scheme;\n const sourcePath = parseURL(sourceURL).path;\n const sameMedium = sourceScheme === parseURL(sourceURL).scheme;\n const modelArtifacts = await loadHandler.load();\n // If moving within the same storage medium, remove the old model as soon as\n // the loading is done. Without doing this, it is possible that the combined\n // size of the two models will cause the cloning to fail.\n if (deleteSource && sameMedium) {\n await ModelStoreManagerRegistry.getManager(sourceScheme)\n .removeModel(sourcePath);\n }\n const saveResult = await saveHandler.save(modelArtifacts);\n // If moving between mediums, the deletion is done after the save succeeds.\n // This guards against the case in which saving to the destination medium\n // fails.\n if (deleteSource && !sameMedium) {\n await ModelStoreManagerRegistry.getManager(sourceScheme)\n .removeModel(sourcePath);\n }\n return saveResult.modelArtifactsInfo;\n}\n/**\n * List all models stored in registered storage mediums.\n *\n * For a web browser environment, the registered mediums are Local Storage and\n * IndexedDB.\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Delete the model.\n * await tf.io.removeModel('localstorage://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n * ```\n *\n * @returns A `Promise` of a dictionary mapping URLs of existing models to\n * their model artifacts info. URLs include medium-specific schemes, e.g.,\n * 'indexeddb://my/model/1'. Model artifacts info include type of the\n * model's topology, byte sizes of the topology, weights, etc.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function listModels() {\n const schemes = ModelStoreManagerRegistry.getSchemes();\n const out = {};\n for (const scheme of schemes) {\n const schemeOut = await ModelStoreManagerRegistry.getManager(scheme).listModels();\n for (const path in schemeOut) {\n const url = scheme + URL_SCHEME_SUFFIX + path;\n out[url] = schemeOut[path];\n }\n }\n return out;\n}\n/**\n * Remove a model specified by URL from a reigstered storage medium.\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Delete the model.\n * await tf.io.removeModel('localstorage://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n * ```\n *\n * @param url A URL to a stored model, with a scheme prefix, e.g.,\n * 'localstorage://my-model-1', 'indexeddb://my/model/2'.\n * @returns ModelArtifactsInfo of the deleted model (if and only if deletion\n * is successful).\n * @throws Error if deletion fails, e.g., if no model exists at `path`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function removeModel(url) {\n const schemeAndPath = parseURL(url);\n const manager = ModelStoreManagerRegistry.getManager(schemeAndPath.scheme);\n return manager.removeModel(schemeAndPath.path);\n}\n/**\n * Copy a model from one URL to another.\n *\n * This function supports:\n *\n * 1. Copying within a storage medium, e.g.,\n * `tf.io.copyModel('localstorage://model-1', 'localstorage://model-2')`\n * 2. Copying between two storage mediums, e.g.,\n * `tf.io.copyModel('localstorage://model-1', 'indexeddb://model-1')`\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Copy the model, from Local Storage to IndexedDB.\n * await tf.io.copyModel(\n * 'localstorage://demo/management/model1',\n * 'indexeddb://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Remove both models.\n * await tf.io.removeModel('localstorage://demo/management/model1');\n * await tf.io.removeModel('indexeddb://demo/management/model1');\n * ```\n *\n * @param sourceURL Source URL of copying.\n * @param destURL Destination URL of copying.\n * @returns ModelArtifactsInfo of the copied model (if and only if copying\n * is successful).\n * @throws Error if copying fails, e.g., if no model exists at `sourceURL`, or\n * if `oldPath` and `newPath` are identical.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function copyModel(sourceURL, destURL) {\n const deleteSource = false;\n return cloneModelInternal(sourceURL, destURL, deleteSource);\n}\n/**\n * Move a model from one URL to another.\n *\n * This function supports:\n *\n * 1. Moving within a storage medium, e.g.,\n * `tf.io.moveModel('localstorage://model-1', 'localstorage://model-2')`\n * 2. Moving between two storage mediums, e.g.,\n * `tf.io.moveModel('localstorage://model-1', 'indexeddb://model-1')`\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Move the model, from Local Storage to IndexedDB.\n * await tf.io.moveModel(\n * 'localstorage://demo/management/model1',\n * 'indexeddb://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Remove the moved model.\n * await tf.io.removeModel('indexeddb://demo/management/model1');\n * ```\n *\n * @param sourceURL Source URL of moving.\n * @param destURL Destination URL of moving.\n * @returns ModelArtifactsInfo of the copied model (if and only if copying\n * is successful).\n * @throws Error if moving fails, e.g., if no model exists at `sourceURL`, or\n * if `oldPath` and `newPath` are identical.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function moveModel(sourceURL, destURL) {\n const deleteSource = true;\n return cloneModelInternal(sourceURL, destURL, deleteSource);\n}\nexport { moveModel, copyModel, removeModel, listModels };\n//# sourceMappingURL=model_management.js.map","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport '../flags';\nimport { env } from '../environment';\nimport { BrowserIndexedDB, BrowserIndexedDBManager } from '../io/indexed_db';\nimport { BrowserLocalStorage, BrowserLocalStorageManager } from '../io/local_storage';\nimport { ModelStoreManagerRegistry } from '../io/model_management';\nexport class PlatformBrowser {\n fetch(path, init) {\n return fetch(path, init);\n }\n now() {\n return performance.now();\n }\n encode(text, encoding) {\n if (encoding !== 'utf-8' && encoding !== 'utf8') {\n throw new Error(`Browser's encoder only supports utf-8, but got ${encoding}`);\n }\n if (this.textEncoder == null) {\n this.textEncoder = new TextEncoder();\n }\n return this.textEncoder.encode(text);\n }\n decode(bytes, encoding) {\n return new TextDecoder(encoding).decode(bytes);\n }\n}\nif (env().get('IS_BROWSER')) {\n env().setPlatform('browser', new PlatformBrowser());\n // Register LocalStorage IOHandler\n try {\n ModelStoreManagerRegistry.registerManager(BrowserLocalStorage.URL_SCHEME, new BrowserLocalStorageManager());\n }\n catch (err) {\n }\n // Register IndexedDB IOHandler\n try {\n ModelStoreManagerRegistry.registerManager(BrowserIndexedDB.URL_SCHEME, new BrowserIndexedDBManager());\n }\n catch (err) {\n }\n}\n//# sourceMappingURL=platform_browser.js.map","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { env } from '../environment';\n// We are wrapping this within an object so it can be stubbed by Jasmine.\nexport const getNodeFetch = {\n // tslint:disable-next-line:no-require-imports\n importFetch: () => require('node-fetch')\n};\nlet systemFetch;\n// These getters and setters are for testing so we don't export a mutable\n// variable.\nexport function resetSystemFetch() {\n systemFetch = null;\n}\nexport function setSystemFetch(fetchFn) {\n systemFetch = fetchFn;\n}\nexport function getSystemFetch() {\n return systemFetch;\n}\nexport class PlatformNode {\n constructor() {\n // tslint:disable-next-line:no-require-imports\n this.util = require('util');\n // According to the spec, the built-in encoder can do only UTF-8 encoding.\n // https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder/TextEncoder\n this.textEncoder = new this.util.TextEncoder();\n }\n fetch(path, requestInits) {\n if (env().global.fetch != null) {\n return env().global.fetch(path, requestInits);\n }\n if (systemFetch == null) {\n systemFetch = getNodeFetch.importFetch();\n }\n return systemFetch(path, requestInits);\n }\n now() {\n const time = process.hrtime();\n return time[0] * 1000 + time[1] / 1000000;\n }\n encode(text, encoding) {\n if (encoding !== 'utf-8' && encoding !== 'utf8') {\n throw new Error(`Node built-in encoder only supports utf-8, but got ${encoding}`);\n }\n return this.textEncoder.encode(text);\n }\n decode(bytes, encoding) {\n if (bytes.length === 0) {\n return '';\n }\n return new this.util.TextDecoder(encoding).decode(bytes);\n }\n}\nif (env().get('IS_NODE')) {\n env().setPlatform('node', new PlatformNode());\n}\n//# sourceMappingURL=platform_node.js.map","/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { TensorBuffer } from '../tensor';\nimport * as util from '../util';\n/**\n * Creates an empty `tf.TensorBuffer` with the specified `shape` and `dtype`.\n *\n * The values are stored in CPU as `TypedArray`. Fill the buffer using\n * `buffer.set()`, or by modifying directly `buffer.values`.\n *\n * When done, call `buffer.toTensor()` to get an immutable `tf.Tensor` with\n * those values.\n *\n * ```js\n * // Create a buffer and set values at particular indices.\n * const buffer = tf.buffer([2, 2]);\n * buffer.set(3, 0, 0);\n * buffer.set(5, 1, 0);\n *\n * // Convert the buffer back to a tensor.\n * buffer.toTensor().print();\n * ```\n *\n * @param shape An array of integers defining the output tensor shape.\n * @param dtype The dtype of the buffer. Defaults to 'float32'.\n * @param values The values of the buffer as `TypedArray`. Defaults to\n * zeros.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function buffer(shape, dtype = 'float32', values) {\n dtype = dtype || 'float32';\n util.assertNonNegativeIntegerDimensions(shape);\n return new TensorBuffer(shape, dtype, values);\n}\n//# sourceMappingURL=buffer.js.map","/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { Cast } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { op } from './operation';\n/**\n * Casts a `tf.Tensor` to a new dtype.\n *\n * ```js\n * const x = tf.tensor1d([1.5, 2.5, 3]);\n * tf.cast(x, 'int32').print();\n * ```\n * @param x The input tensor to be casted.\n * @param dtype The dtype to cast the input tensor to.\n *\n * @doc {heading: 'Tensors', subheading: 'Transformations'}\n */\nfunction cast_(x, dtype) {\n const $x = convertToTensor(x, 'x', 'cast');\n // Sanity checks.\n if (!util.isValidDtype(dtype)) {\n throw new Error(`Failed to cast to unknown dtype ${dtype}`);\n }\n if (dtype === 'string' && $x.dtype !== 'string' ||\n dtype !== 'string' && $x.dtype === 'string') {\n throw new Error('Only strings can be casted to strings');\n }\n const inputs = { x: $x };\n const attrs = { dtype };\n return ENGINE.runKernelFunc(backend => backend.cast($x, dtype), inputs, null /* grad */, Cast, attrs);\n}\nexport const cast = op({ cast_ });\n//# sourceMappingURL=cast.js.map","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { Identity } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport { op } from './operation';\n/**\n * Creates a new tensor with the same values and shape as the specified\n * tensor.\n *\n * ```js\n * const x = tf.tensor([1, 2]);\n *\n * x.clone().print();\n * ```\n *\n * @param x The tensor to clone.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction clone_(x) {\n const $x = convertToTensor(x, 'x', 'clone', null);\n const forward = () => ENGINE.makeTensorFromDataId($x.dataId, $x.shape, $x.dtype);\n const inputs = { x: $x };\n // Note this op is called tf.identity in python. Hence the kernel name used\n // here.\n return ENGINE.runKernelFunc(forward, inputs, null /* grad */, Identity);\n}\nexport const clone = op({ clone_ });\n//# sourceMappingURL=clone.js.map","/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * Prints information about the `tf.Tensor` including its data.\n *\n * ```js\n * const verbose = true;\n * tf.tensor2d([1, 2, 3, 4], [2, 2]).print(verbose);\n * ```\n * @param x The tensor to be printed.\n * @param verbose Whether to print verbose information about the ` Tensor`,\n * including dtype and size.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function print(x, verbose = false) {\n console.log(x.toString(verbose));\n}\n//# sourceMappingURL=print.js.map","/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// Required side effectful code for tfjs-core\n// Set up Engine and ENV\nimport { getOrMakeEngine } from './engine';\ngetOrMakeEngine();\n// Register backend-agnostic flags.\nimport './flags';\n// Register platforms\nimport './platforms/platform_browser';\nimport './platforms/platform_node';\n// Set up OpHandler\nimport { buffer } from './ops/buffer';\nimport { cast } from './ops/cast';\nimport { clone } from './ops/clone';\nimport { print } from './ops/print';\nimport { setOpHandler } from './tensor';\nconst opHandler = {\n buffer,\n cast,\n clone,\n print\n};\nsetOpHandler(opHandler);\n//# sourceMappingURL=base_side_effects.js.map","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * IOHandlers related to files, such as browser-triggered file downloads,\n * user-selected files in browser.\n */\nimport '../flags';\nimport { env } from '../environment';\nimport { basename, concatenateArrayBuffers, getModelArtifactsInfoForJSON } from './io_utils';\nimport { IORouterRegistry } from './router_registry';\nconst DEFAULT_FILE_NAME_PREFIX = 'model';\nconst DEFAULT_JSON_EXTENSION_NAME = '.json';\nconst DEFAULT_WEIGHT_DATA_EXTENSION_NAME = '.weights.bin';\nfunction defer(f) {\n return new Promise(resolve => setTimeout(resolve)).then(f);\n}\nexport class BrowserDownloads {\n constructor(fileNamePrefix) {\n if (!env().getBool('IS_BROWSER')) {\n // TODO(cais): Provide info on what IOHandlers are available under the\n // current environment.\n throw new Error('browserDownloads() cannot proceed because the current environment ' +\n 'is not a browser.');\n }\n if (fileNamePrefix.startsWith(BrowserDownloads.URL_SCHEME)) {\n fileNamePrefix = fileNamePrefix.slice(BrowserDownloads.URL_SCHEME.length);\n }\n if (fileNamePrefix == null || fileNamePrefix.length === 0) {\n fileNamePrefix = DEFAULT_FILE_NAME_PREFIX;\n }\n this.modelTopologyFileName = fileNamePrefix + DEFAULT_JSON_EXTENSION_NAME;\n this.weightDataFileName =\n fileNamePrefix + DEFAULT_WEIGHT_DATA_EXTENSION_NAME;\n }\n async save(modelArtifacts) {\n if (typeof (document) === 'undefined') {\n throw new Error('Browser downloads are not supported in ' +\n 'this environment since `document` is not present');\n }\n const weightsURL = window.URL.createObjectURL(new Blob([modelArtifacts.weightData], { type: 'application/octet-stream' }));\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('BrowserDownloads.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n else {\n const weightsManifest = [{\n paths: ['./' + this.weightDataFileName],\n weights: modelArtifacts.weightSpecs\n }];\n const modelTopologyAndWeightManifest = {\n modelTopology: modelArtifacts.modelTopology,\n format: modelArtifacts.format,\n generatedBy: modelArtifacts.generatedBy,\n convertedBy: modelArtifacts.convertedBy,\n weightsManifest\n };\n const modelTopologyAndWeightManifestURL = window.URL.createObjectURL(new Blob([JSON.stringify(modelTopologyAndWeightManifest)], { type: 'application/json' }));\n // If anchor elements are not provided, create them without attaching them\n // to parents, so that the downloaded file names can be controlled.\n const jsonAnchor = this.jsonAnchor == null ? document.createElement('a') :\n this.jsonAnchor;\n jsonAnchor.download = this.modelTopologyFileName;\n jsonAnchor.href = modelTopologyAndWeightManifestURL;\n // Trigger downloads by evoking a click event on the download anchors.\n // When multiple downloads are started synchronously, Firefox will only\n // save the last one.\n await defer(() => jsonAnchor.dispatchEvent(new MouseEvent('click')));\n if (modelArtifacts.weightData != null) {\n const weightDataAnchor = this.weightDataAnchor == null ?\n document.createElement('a') :\n this.weightDataAnchor;\n weightDataAnchor.download = this.weightDataFileName;\n weightDataAnchor.href = weightsURL;\n await defer(() => weightDataAnchor.dispatchEvent(new MouseEvent('click')));\n }\n return { modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts) };\n }\n }\n}\nBrowserDownloads.URL_SCHEME = 'downloads://';\nclass BrowserFiles {\n constructor(files) {\n if (files == null || files.length < 1) {\n throw new Error(`When calling browserFiles, at least 1 file is required, ` +\n `but received ${files}`);\n }\n this.files = files;\n }\n async load() {\n const jsonFile = this.files[0];\n const weightFiles = this.files.slice(1);\n return new Promise((resolve, reject) => {\n const jsonReader = new FileReader();\n jsonReader.onload = (event) => {\n // tslint:disable-next-line:no-any\n const modelJSON = JSON.parse(event.target.result);\n const modelTopology = modelJSON.modelTopology;\n if (modelTopology == null) {\n reject(new Error(`modelTopology field is missing from file ${jsonFile.name}`));\n return;\n }\n if (weightFiles.length === 0) {\n resolve({ modelTopology });\n }\n const weightsManifest = modelJSON.weightsManifest;\n if (weightsManifest == null) {\n reject(new Error(`weightManifest field is missing from file ${jsonFile.name}`));\n return;\n }\n let pathToFile;\n try {\n pathToFile =\n this.checkManifestAndWeightFiles(weightsManifest, weightFiles);\n }\n catch (err) {\n reject(err);\n return;\n }\n const weightSpecs = [];\n const paths = [];\n const perFileBuffers = [];\n weightsManifest.forEach(weightsGroup => {\n weightsGroup.paths.forEach(path => {\n paths.push(path);\n perFileBuffers.push(null);\n });\n weightSpecs.push(...weightsGroup.weights);\n });\n weightsManifest.forEach(weightsGroup => {\n weightsGroup.paths.forEach(path => {\n const weightFileReader = new FileReader();\n weightFileReader.onload = (event) => {\n // tslint:disable-next-line:no-any\n const weightData = event.target.result;\n const index = paths.indexOf(path);\n perFileBuffers[index] = weightData;\n if (perFileBuffers.indexOf(null) === -1) {\n resolve({\n modelTopology,\n weightSpecs,\n weightData: concatenateArrayBuffers(perFileBuffers),\n format: modelJSON.format,\n generatedBy: modelJSON.generatedBy,\n convertedBy: modelJSON.convertedBy,\n userDefinedMetadata: modelJSON.userDefinedMetadata\n });\n }\n };\n weightFileReader.onerror = error => reject(`Failed to weights data from file of path '${path}'.`);\n weightFileReader.readAsArrayBuffer(pathToFile[path]);\n });\n });\n };\n jsonReader.onerror = error => reject(`Failed to read model topology and weights manifest JSON ` +\n `from file '${jsonFile.name}'. BrowserFiles supports loading ` +\n `Keras-style tf.Model artifacts only.`);\n jsonReader.readAsText(jsonFile);\n });\n }\n /**\n * Check the compatibility between weights manifest and weight files.\n */\n checkManifestAndWeightFiles(manifest, files) {\n const basenames = [];\n const fileNames = files.map(file => basename(file.name));\n const pathToFile = {};\n for (const group of manifest) {\n group.paths.forEach(path => {\n const pathBasename = basename(path);\n if (basenames.indexOf(pathBasename) !== -1) {\n throw new Error(`Duplicate file basename found in weights manifest: ` +\n `'${pathBasename}'`);\n }\n basenames.push(pathBasename);\n if (fileNames.indexOf(pathBasename) === -1) {\n throw new Error(`Weight file with basename '${pathBasename}' is not provided.`);\n }\n else {\n pathToFile[path] = files[fileNames.indexOf(pathBasename)];\n }\n });\n }\n if (basenames.length !== files.length) {\n throw new Error(`Mismatch in the number of files in weights manifest ` +\n `(${basenames.length}) and the number of weight files provided ` +\n `(${files.length}).`);\n }\n return pathToFile;\n }\n}\nexport const browserDownloadsRouter = (url) => {\n if (!env().getBool('IS_BROWSER')) {\n return null;\n }\n else {\n if (!Array.isArray(url) && url.startsWith(BrowserDownloads.URL_SCHEME)) {\n return browserDownloads(url.slice(BrowserDownloads.URL_SCHEME.length));\n }\n else {\n return null;\n }\n }\n};\nIORouterRegistry.registerSaveRouter(browserDownloadsRouter);\n/**\n * Creates an IOHandler that triggers file downloads from the browser.\n *\n * The returned `IOHandler` instance can be used as model exporting methods such\n * as `tf.Model.save` and supports only saving.\n *\n * ```js\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * const saveResult = await model.save('downloads://mymodel');\n * // This will trigger downloading of two files:\n * // 'mymodel.json' and 'mymodel.weights.bin'.\n * console.log(saveResult);\n * ```\n *\n * @param fileNamePrefix Prefix name of the files to be downloaded. For use with\n * `tf.Model`, `fileNamePrefix` should follow either of the following two\n * formats:\n * 1. `null` or `undefined`, in which case the default file\n * names will be used:\n * - 'model.json' for the JSON file containing the model topology and\n * weights manifest.\n * - 'model.weights.bin' for the binary file containing the binary weight\n * values.\n * 2. A single string or an Array of a single string, as the file name prefix.\n * For example, if `'foo'` is provided, the downloaded JSON\n * file and binary weights file will be named 'foo.json' and\n * 'foo.weights.bin', respectively.\n * @param config Additional configuration for triggering downloads.\n * @returns An instance of `BrowserDownloads` `IOHandler`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Loading',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nexport function browserDownloads(fileNamePrefix = 'model') {\n return new BrowserDownloads(fileNamePrefix);\n}\n/**\n * Creates an IOHandler that loads model artifacts from user-selected files.\n *\n * This method can be used for loading from files such as user-selected files\n * in the browser.\n * When used in conjunction with `tf.loadLayersModel`, an instance of\n * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.\n *\n * ```js\n * // Note: This code snippet won't run properly without the actual file input\n * // elements in the HTML DOM.\n *\n * // Suppose there are two HTML file input (``)\n * // elements.\n * const uploadJSONInput = document.getElementById('upload-json');\n * const uploadWeightsInput = document.getElementById('upload-weights');\n * const model = await tf.loadLayersModel(tf.io.browserFiles(\n * [uploadJSONInput.files[0], uploadWeightsInput.files[0]]));\n * ```\n *\n * @param files `File`s to load from. Currently, this function supports only\n * loading from files that contain Keras-style models (i.e., `tf.Model`s), for\n * which an `Array` of `File`s is expected (in that order):\n * - A JSON file containing the model topology and weight manifest.\n * - Optionally, One or more binary files containing the binary weights.\n * These files must have names that match the paths in the `weightsManifest`\n * contained by the aforementioned JSON file, or errors will be thrown\n * during loading. These weights files have the same format as the ones\n * generated by `tensorflowjs_converter` that comes with the `tensorflowjs`\n * Python PIP package. If no weights files are provided, only the model\n * topology will be loaded from the JSON file above.\n * @returns An instance of `Files` `IOHandler`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Loading',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nexport function browserFiles(files) {\n return new BrowserFiles(files);\n}\n//# sourceMappingURL=browser_files.js.map","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { assert } from '../util';\n/**\n * Monitor Promise.all progress, fire onProgress callback function.\n *\n * @param promises Promise list going to be monitored\n * @param onProgress Callback function. Fired when a promise resolved.\n * @param startFraction Optional fraction start. Default to 0.\n * @param endFraction Optional fraction end. Default to 1.\n */\nexport function monitorPromisesProgress(promises, onProgress, startFraction, endFraction) {\n checkPromises(promises);\n startFraction = startFraction == null ? 0 : startFraction;\n endFraction = endFraction == null ? 1 : endFraction;\n checkFraction(startFraction, endFraction);\n let resolvedPromise = 0;\n const registerMonitor = (promise) => {\n promise.then(value => {\n const fraction = startFraction +\n ++resolvedPromise / promises.length * (endFraction - startFraction);\n // pass fraction as parameter to callback function.\n onProgress(fraction);\n return value;\n });\n return promise;\n };\n function checkPromises(promises) {\n assert(promises != null && Array.isArray(promises) && promises.length > 0, () => 'promises must be a none empty array');\n }\n function checkFraction(startFraction, endFraction) {\n assert(startFraction >= 0 && startFraction <= 1, () => `Progress fraction must be in range [0, 1], but ` +\n `got startFraction ${startFraction}`);\n assert(endFraction >= 0 && endFraction <= 1, () => `Progress fraction must be in range [0, 1], but ` +\n `got endFraction ${endFraction}`);\n assert(endFraction >= startFraction, () => `startFraction must be no more than endFraction, but ` +\n `got startFraction ${startFraction} and endFraction ` +\n `${endFraction}`);\n }\n return Promise.all(promises.map(registerMonitor));\n}\n//# sourceMappingURL=progress.js.map","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { env } from '../environment';\nimport * as util from '../util';\nimport { decodeWeights } from './io_utils';\nimport { monitorPromisesProgress } from './progress';\nimport { DTYPE_VALUE_SIZE_MAP } from './types';\n/**\n * Reads binary weights data from a number of URLs.\n *\n * @param fetchURLs URLs to send the HTTP requests at, using `fetch` calls.\n * @param requestOptions RequestInit (options) for the HTTP requests.\n * @param fetchFunc Optional overriding value for the `window.fetch` function.\n * @param onProgress Optional, progress callback function, fired periodically\n * before the load is completed.\n * @returns A `Promise` of an Array of `ArrayBuffer`. The Array has the same\n * length as `fetchURLs`.\n */\nexport async function loadWeightsAsArrayBuffer(fetchURLs, loadOptions) {\n if (loadOptions == null) {\n loadOptions = {};\n }\n const fetchFunc = loadOptions.fetchFunc == null ? env().platform.fetch :\n loadOptions.fetchFunc;\n // Create the requests for all of the weights in parallel.\n const requests = fetchURLs.map(fetchURL => fetchFunc(fetchURL, loadOptions.requestInit, { isBinary: true }));\n const fetchStartFraction = 0;\n const fetchEndFraction = 0.5;\n const responses = loadOptions.onProgress == null ?\n await Promise.all(requests) :\n await monitorPromisesProgress(requests, loadOptions.onProgress, fetchStartFraction, fetchEndFraction);\n const bufferPromises = responses.map(response => response.arrayBuffer());\n const bufferStartFraction = 0.5;\n const bufferEndFraction = 1;\n const buffers = loadOptions.onProgress == null ?\n await Promise.all(bufferPromises) :\n await monitorPromisesProgress(bufferPromises, loadOptions.onProgress, bufferStartFraction, bufferEndFraction);\n return buffers;\n}\n/**\n * Reads a weights manifest JSON configuration, fetches the weights and\n * returns them as `Tensor`s.\n *\n * @param manifest The weights manifest JSON.\n * @param filePathPrefix The path prefix for filenames given in the manifest.\n * Defaults to the empty string.\n * @param weightNames The names of the weights to be fetched.\n */\nexport async function loadWeights(manifest, filePathPrefix = '', weightNames, requestInit) {\n // TODO(nsthorat): Groups are currently fetched atomically. If you need a\n // single weight from a group, the whole group will be fetched. At a future\n // date, we should support fetching only the individual shards within a\n // group that are needed to reconstruct the requested weight.\n // TODO(cais): Use `decodeWeights` for implementation.\n const fetchWeights = (fetchUrls) => loadWeightsAsArrayBuffer(fetchUrls, { requestInit });\n const loadWeights = weightsLoaderFactory(fetchWeights);\n return loadWeights(manifest, filePathPrefix, weightNames);\n}\n/**\n * Creates a function, which reads a weights manifest JSON configuration,\n * fetches the weight files using the specified function and returns them as\n * `Tensor`s.\n *\n * ```js\n * // example for creating a nodejs weight loader, which reads the weight files\n * // from disk using fs.readFileSync\n *\n * import * as fs from 'fs'\n *\n * const fetchWeightsFromDisk = (filePaths: string[]) =>\n * filePaths.map(filePath => fs.readFileSync(filePath).buffer)\n *\n * const loadWeights = tf.io.weightsLoaderFactory(fetchWeightsFromDisk)\n *\n * const manifest = JSON.parse(\n * fs.readFileSync('./my_model-weights_manifest').toString()\n * )\n * const weightMap = await loadWeights(manifest, './')\n * ```\n * @param fetchWeightsFunction The function used for fetching the weight files.\n * @returns Weight loading function.\n */\nexport function weightsLoaderFactory(fetchWeightsFunction) {\n return async (manifest, filePathPrefix = '', weightNames) => {\n // Collect all the groups, weights, and their relative offsets to be\n // fetched.\n const groupIndicesToFetchMap = manifest.map(() => false);\n const groupWeightsToFetch = {};\n const weightsFound = weightNames != null ? weightNames.map(() => false) : [];\n const allManifestWeightNames = [];\n manifest.forEach((manifestGroupConfig, groupIndex) => {\n let groupOffset = 0;\n manifestGroupConfig.weights.forEach(weightsEntry => {\n const rawDtype = ('quantization' in weightsEntry) ?\n weightsEntry.quantization.dtype :\n weightsEntry.dtype;\n const weightsBytes = DTYPE_VALUE_SIZE_MAP[rawDtype] *\n util.sizeFromShape(weightsEntry.shape);\n const enqueueWeightsForFetchingFn = () => {\n groupIndicesToFetchMap[groupIndex] = true;\n if (groupWeightsToFetch[groupIndex] == null) {\n groupWeightsToFetch[groupIndex] = [];\n }\n groupWeightsToFetch[groupIndex].push({\n manifestEntry: weightsEntry,\n groupOffset,\n sizeBytes: weightsBytes\n });\n };\n if (weightNames != null) {\n weightNames.forEach((weightName, weightIndex) => {\n if (weightName === weightsEntry.name) {\n enqueueWeightsForFetchingFn();\n weightsFound[weightIndex] = true;\n }\n });\n }\n else {\n enqueueWeightsForFetchingFn();\n }\n allManifestWeightNames.push(weightsEntry.name);\n groupOffset += weightsBytes;\n });\n });\n if (!weightsFound.every(found => found)) {\n const weightsNotFound = weightNames.filter((_, i) => !weightsFound[i]);\n throw new Error(`Could not find weights in manifest with names: ` +\n `${weightsNotFound.join(', ')}. \\n` +\n `Manifest JSON has weights with names: ` +\n `${allManifestWeightNames.join(', ')}.`);\n }\n // Convert the one-hot boolean groupId => shouldFetch map to a list of group\n // IDs.\n const groupIndicesToFetch = groupIndicesToFetchMap.reduce((accumulator, shouldFetch, i) => {\n if (shouldFetch) {\n accumulator.push(i);\n }\n return accumulator;\n }, []);\n const fetchUrls = [];\n groupIndicesToFetch.forEach(i => {\n manifest[i].paths.forEach(filepath => {\n const fetchUrl = filePathPrefix +\n (!filePathPrefix.endsWith('/') ? '/' : '') + filepath;\n fetchUrls.push(fetchUrl);\n });\n });\n const buffers = await fetchWeightsFunction(fetchUrls);\n const weightsTensorMap = {};\n let bufferIndexOffset = 0;\n groupIndicesToFetch.forEach(i => {\n const numBuffers = manifest[i].paths.length;\n let groupBytes = 0;\n for (let i = 0; i < numBuffers; i++) {\n groupBytes += buffers[bufferIndexOffset + i].byteLength;\n }\n // Create a buffer for the whole group.\n const groupBuffer = new ArrayBuffer(groupBytes);\n const groupByteBuffer = new Uint8Array(groupBuffer);\n let groupBufferOffset = 0;\n for (let i = 0; i < numBuffers; i++) {\n const buffer = new Uint8Array(buffers[bufferIndexOffset + i]);\n groupByteBuffer.set(buffer, groupBufferOffset);\n groupBufferOffset += buffer.byteLength;\n }\n const weightsEntries = groupWeightsToFetch[i];\n weightsEntries.forEach(weightsEntry => {\n const byteBuffer = groupBuffer.slice(weightsEntry.groupOffset, weightsEntry.groupOffset + weightsEntry.sizeBytes);\n const nameToTensorMap = decodeWeights(byteBuffer, [weightsEntry.manifestEntry]);\n for (const name in nameToTensorMap) {\n weightsTensorMap[name] = nameToTensorMap[name];\n }\n });\n bufferIndexOffset += numBuffers;\n });\n return weightsTensorMap;\n };\n}\n//# sourceMappingURL=weights_loader.js.map","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * IOHandler implementations based on HTTP requests in the web browser.\n *\n * Uses [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API).\n */\nimport { env } from '../environment';\nimport { assert } from '../util';\nimport { concatenateArrayBuffers, getModelArtifactsInfoForJSON } from './io_utils';\nimport { IORouterRegistry } from './router_registry';\nimport { loadWeightsAsArrayBuffer } from './weights_loader';\nconst OCTET_STREAM_MIME_TYPE = 'application/octet-stream';\nconst JSON_TYPE = 'application/json';\nexport class HTTPRequest {\n constructor(path, loadOptions) {\n this.DEFAULT_METHOD = 'POST';\n if (loadOptions == null) {\n loadOptions = {};\n }\n this.weightPathPrefix = loadOptions.weightPathPrefix;\n this.onProgress = loadOptions.onProgress;\n this.weightUrlConverter = loadOptions.weightUrlConverter;\n if (loadOptions.fetchFunc != null) {\n assert(typeof loadOptions.fetchFunc === 'function', () => 'Must pass a function that matches the signature of ' +\n '`fetch` (see ' +\n 'https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)');\n this.fetch = loadOptions.fetchFunc;\n }\n else {\n this.fetch = env().platform.fetch;\n }\n assert(path != null && path.length > 0, () => 'URL path for http must not be null, undefined or ' +\n 'empty.');\n if (Array.isArray(path)) {\n assert(path.length === 2, () => 'URL paths for http must have a length of 2, ' +\n `(actual length is ${path.length}).`);\n }\n this.path = path;\n if (loadOptions.requestInit != null &&\n loadOptions.requestInit.body != null) {\n throw new Error('requestInit is expected to have no pre-existing body, but has one.');\n }\n this.requestInit = loadOptions.requestInit || {};\n }\n async save(modelArtifacts) {\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('BrowserHTTPRequest.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n const init = Object.assign({ method: this.DEFAULT_METHOD }, this.requestInit);\n init.body = new FormData();\n const weightsManifest = [{\n paths: ['./model.weights.bin'],\n weights: modelArtifacts.weightSpecs,\n }];\n const modelTopologyAndWeightManifest = {\n modelTopology: modelArtifacts.modelTopology,\n format: modelArtifacts.format,\n generatedBy: modelArtifacts.generatedBy,\n convertedBy: modelArtifacts.convertedBy,\n userDefinedMetadata: modelArtifacts.userDefinedMetadata,\n weightsManifest\n };\n init.body.append('model.json', new Blob([JSON.stringify(modelTopologyAndWeightManifest)], { type: JSON_TYPE }), 'model.json');\n if (modelArtifacts.weightData != null) {\n init.body.append('model.weights.bin', new Blob([modelArtifacts.weightData], { type: OCTET_STREAM_MIME_TYPE }), 'model.weights.bin');\n }\n const response = await this.fetch(this.path, init);\n if (response.ok) {\n return {\n modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts),\n responses: [response],\n };\n }\n else {\n throw new Error(`BrowserHTTPRequest.save() failed due to HTTP response status ` +\n `${response.status}.`);\n }\n }\n /**\n * Load model artifacts via HTTP request(s).\n *\n * See the documentation to `tf.io.http` for details on the saved\n * artifacts.\n *\n * @returns The loaded model artifacts (if loading succeeds).\n */\n async load() {\n const modelConfigRequest = await this.fetch(this.path, this.requestInit);\n if (!modelConfigRequest.ok) {\n throw new Error(`Request to ${this.path} failed with status code ` +\n `${modelConfigRequest.status}. Please verify this URL points to ` +\n `the model JSON of the model to load.`);\n }\n let modelConfig;\n try {\n modelConfig = await modelConfigRequest.json();\n }\n catch (e) {\n let message = `Failed to parse model JSON of response from ${this.path}.`;\n // TODO(nsthorat): Remove this after some time when we're comfortable that\n // .pb files are mostly gone.\n if (this.path.endsWith('.pb')) {\n message += ' Your path contains a .pb file extension. ' +\n 'Support for .pb models have been removed in TensorFlow.js 1.0 ' +\n 'in favor of .json models. You can re-convert your Python ' +\n 'TensorFlow model using the TensorFlow.js 1.0 conversion scripts ' +\n 'or you can convert your.pb models with the \\'pb2json\\'' +\n 'NPM script in the tensorflow/tfjs-converter repository.';\n }\n else {\n message += ' Please make sure the server is serving valid ' +\n 'JSON for this request.';\n }\n throw new Error(message);\n }\n const modelTopology = modelConfig.modelTopology;\n const weightsManifest = modelConfig.weightsManifest;\n const generatedBy = modelConfig.generatedBy;\n const convertedBy = modelConfig.convertedBy;\n const format = modelConfig.format;\n const userDefinedMetadata = modelConfig.userDefinedMetadata;\n // We do not allow both modelTopology and weightsManifest to be missing.\n if (modelTopology == null && weightsManifest == null) {\n throw new Error(`The JSON from HTTP path ${this.path} contains neither model ` +\n `topology or manifest for weights.`);\n }\n let weightSpecs;\n let weightData;\n if (weightsManifest != null) {\n const results = await this.loadWeights(weightsManifest);\n [weightSpecs, weightData] = results;\n }\n const artifacts = {\n modelTopology,\n weightSpecs,\n weightData,\n userDefinedMetadata,\n generatedBy,\n convertedBy,\n format\n };\n const initializer = modelConfig.modelInitializer;\n if (initializer) {\n artifacts.modelInitializer = initializer;\n }\n return artifacts;\n }\n async loadWeights(weightsManifest) {\n const weightPath = Array.isArray(this.path) ? this.path[1] : this.path;\n const [prefix, suffix] = parseUrl(weightPath);\n const pathPrefix = this.weightPathPrefix || prefix;\n const weightSpecs = [];\n for (const entry of weightsManifest) {\n weightSpecs.push(...entry.weights);\n }\n const fetchURLs = [];\n const urlPromises = [];\n for (const weightsGroup of weightsManifest) {\n for (const path of weightsGroup.paths) {\n if (this.weightUrlConverter != null) {\n urlPromises.push(this.weightUrlConverter(path));\n }\n else {\n fetchURLs.push(pathPrefix + path + suffix);\n }\n }\n }\n if (this.weightUrlConverter) {\n fetchURLs.push(...await Promise.all(urlPromises));\n }\n const buffers = await loadWeightsAsArrayBuffer(fetchURLs, {\n requestInit: this.requestInit,\n fetchFunc: this.fetch,\n onProgress: this.onProgress\n });\n return [weightSpecs, concatenateArrayBuffers(buffers)];\n }\n}\nHTTPRequest.URL_SCHEME_REGEX = /^https?:\\/\\//;\n/**\n * Extract the prefix and suffix of the url, where the prefix is the path before\n * the last file, and suffix is the search params after the last file.\n * ```\n * const url = 'http://tfhub.dev/model/1/tensorflowjs_model.pb?tfjs-format=file'\n * [prefix, suffix] = parseUrl(url)\n * // prefix = 'http://tfhub.dev/model/1/'\n * // suffix = '?tfjs-format=file'\n * ```\n * @param url the model url to be parsed.\n */\nexport function parseUrl(url) {\n const lastSlash = url.lastIndexOf('/');\n const lastSearchParam = url.lastIndexOf('?');\n const prefix = url.substring(0, lastSlash);\n const suffix = lastSearchParam > lastSlash ? url.substring(lastSearchParam) : '';\n return [prefix + '/', suffix];\n}\nexport function isHTTPScheme(url) {\n return url.match(HTTPRequest.URL_SCHEME_REGEX) != null;\n}\nexport const httpRouter = (url, loadOptions) => {\n if (typeof fetch === 'undefined' &&\n (loadOptions == null || loadOptions.fetchFunc == null)) {\n // `http` uses `fetch` or `node-fetch`, if one wants to use it in\n // an environment that is not the browser or node they have to setup a\n // global fetch polyfill.\n return null;\n }\n else {\n let isHTTP = true;\n if (Array.isArray(url)) {\n isHTTP = url.every(urlItem => isHTTPScheme(urlItem));\n }\n else {\n isHTTP = isHTTPScheme(url);\n }\n if (isHTTP) {\n return http(url, loadOptions);\n }\n }\n return null;\n};\nIORouterRegistry.registerSaveRouter(httpRouter);\nIORouterRegistry.registerLoadRouter(httpRouter);\n/**\n * Creates an IOHandler subtype that sends model artifacts to HTTP server.\n *\n * An HTTP request of the `multipart/form-data` mime type will be sent to the\n * `path` URL. The form data includes artifacts that represent the topology\n * and/or weights of the model. In the case of Keras-style `tf.Model`, two\n * blobs (files) exist in form-data:\n * - A JSON file consisting of `modelTopology` and `weightsManifest`.\n * - A binary weights file consisting of the concatenated weight values.\n * These files are in the same format as the one generated by\n * [tfjs_converter](https://js.tensorflow.org/tutorials/import-keras.html).\n *\n * The following code snippet exemplifies the client-side code that uses this\n * function:\n *\n * ```js\n * const model = tf.sequential();\n * model.add(\n * tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));\n *\n * const saveResult = await model.save(tf.io.http(\n * 'http://model-server:5000/upload', {requestInit: {method: 'PUT'}}));\n * console.log(saveResult);\n * ```\n *\n * If the default `POST` method is to be used, without any custom parameters\n * such as headers, you can simply pass an HTTP or HTTPS URL to `model.save`:\n *\n * ```js\n * const saveResult = await model.save('http://model-server:5000/upload');\n * ```\n *\n * The following GitHub Gist\n * https://gist.github.com/dsmilkov/1b6046fd6132d7408d5257b0976f7864\n * implements a server based on [flask](https://github.com/pallets/flask) that\n * can receive the request. Upon receiving the model artifacts via the requst,\n * this particular server reconsistutes instances of [Keras\n * Models](https://keras.io/models/model/) in memory.\n *\n *\n * @param path A URL path to the model.\n * Can be an absolute HTTP path (e.g.,\n * 'http://localhost:8000/model-upload)') or a relative path (e.g.,\n * './model-upload').\n * @param requestInit Request configurations to be used when sending\n * HTTP request to server using `fetch`. It can contain fields such as\n * `method`, `credentials`, `headers`, `mode`, etc. See\n * https://developer.mozilla.org/en-US/docs/Web/API/Request/Request\n * for more information. `requestInit` must not have a body, because the\n * body will be set by TensorFlow.js. File blobs representing the model\n * topology (filename: 'model.json') and the weights of the model (filename:\n * 'model.weights.bin') will be appended to the body. If `requestInit` has a\n * `body`, an Error will be thrown.\n * @param loadOptions Optional configuration for the loading. It includes the\n * following fields:\n * - weightPathPrefix Optional, this specifies the path prefix for weight\n * files, by default this is calculated from the path param.\n * - fetchFunc Optional, custom `fetch` function. E.g., in Node.js,\n * the `fetch` from node-fetch can be used here.\n * - onProgress Optional, progress callback function, fired periodically\n * before the load is completed.\n * @returns An instance of `IOHandler`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Loading',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nexport function http(path, loadOptions) {\n return new HTTPRequest(path, loadOptions);\n}\n/**\n * Deprecated. Use `tf.io.http`.\n * @param path\n * @param loadOptions\n */\nexport function browserHTTPRequest(path, loadOptions) {\n return http(path, loadOptions);\n}\n//# sourceMappingURL=http.js.map","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nclass PassthroughLoader {\n constructor(modelArtifacts) {\n this.modelArtifacts = modelArtifacts;\n }\n async load() {\n return this.modelArtifacts;\n }\n}\nclass PassthroughSaver {\n constructor(saveHandler) {\n this.saveHandler = saveHandler;\n }\n async save(modelArtifacts) {\n return this.saveHandler(modelArtifacts);\n }\n}\n/**\n * Creates an IOHandler that loads model artifacts from memory.\n *\n * When used in conjunction with `tf.loadLayersModel`, an instance of\n * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.\n *\n * ```js\n * const model = await tf.loadLayersModel(tf.io.fromMemory(\n * modelTopology, weightSpecs, weightData));\n * ```\n *\n * @param modelArtifacts a object containing model topology (i.e., parsed from\n * the JSON format).\n * @param weightSpecs An array of `WeightsManifestEntry` objects describing the\n * names, shapes, types, and quantization of the weight data.\n * @param weightData A single `ArrayBuffer` containing the weight data,\n * concatenated in the order described by the weightSpecs.\n * @param trainingConfig Model training configuration. Optional.\n *\n * @returns A passthrough `IOHandler` that simply loads the provided data.\n */\nexport function fromMemory(modelArtifacts, weightSpecs, weightData, trainingConfig) {\n if (arguments.length === 1) {\n const isModelArtifacts = modelArtifacts.modelTopology != null ||\n modelArtifacts.weightSpecs != null;\n if (isModelArtifacts) {\n return new PassthroughLoader(modelArtifacts);\n }\n else {\n // Legacy support: with only modelTopology.\n // TODO(cais): Remove this deprecated API.\n console.warn('Please call tf.io.fromMemory() with only one argument. ' +\n 'The argument should be of type ModelArtifacts. ' +\n 'The multi-argument signature of tf.io.fromMemory() has been ' +\n 'deprecated and will be removed in a future release.');\n return new PassthroughLoader({ modelTopology: modelArtifacts });\n }\n }\n else {\n // Legacy support.\n // TODO(cais): Remove this deprecated API.\n console.warn('Please call tf.io.fromMemory() with only one argument. ' +\n 'The argument should be of type ModelArtifacts. ' +\n 'The multi-argument signature of tf.io.fromMemory() has been ' +\n 'deprecated and will be removed in a future release.');\n return new PassthroughLoader({\n modelTopology: modelArtifacts,\n weightSpecs,\n weightData,\n trainingConfig\n });\n }\n}\n/**\n * Creates an IOHandler that passes saved model artifacts to a callback.\n *\n * ```js\n * function handleSave(artifacts) {\n * // ... do something with the artifacts ...\n * return {modelArtifactsInfo: {...}, ...};\n * }\n *\n * const saveResult = model.save(tf.io.withSaveHandler(handleSave));\n * ```\n *\n * @param saveHandler A function that accepts a `ModelArtifacts` and returns a\n * `SaveResult`.\n */\nexport function withSaveHandler(saveHandler) {\n return new PassthroughSaver(saveHandler);\n}\n//# sourceMappingURL=passthrough.js.map","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { Reshape } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { op } from './operation';\n/**\n * Reshapes a `tf.Tensor` to a given shape.\n *\n * Given an input tensor, returns a new tensor with the same values as the\n * input tensor with shape `shape`.\n *\n * If one component of shape is the special value -1, the size of that\n * dimension is computed so that the total size remains constant. In\n * particular, a shape of [-1] flattens into 1-D. At most one component of\n * shape can be -1.\n *\n * If shape is 1-D or higher, then the operation returns a tensor with shape\n * shape filled with the values of tensor. In this case, the number of\n * elements implied by shape must be the same as the number of elements in\n * tensor.\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3, 4]);\n * x.reshape([2, 2]).print();\n * ```\n *\n * @param x The input tensor to be reshaped.\n * @param shape An array of integers defining the output tensor shape.\n *\n * @doc {heading: 'Tensors', subheading: 'Transformations'}\n */\nfunction reshape_(x, shape) {\n const $x = convertToTensor(x, 'x', 'reshape', null);\n const inputs = { x: $x };\n const attrs = { shape };\n const forward = (backend, save) => {\n shape = util.inferFromImplicitShape(shape, $x.size);\n util.assert($x.size === util.sizeFromShape(shape), () => 'new shape and old shape must have the same number of elements.');\n save([$x]);\n return backend.reshape($x, shape);\n };\n return ENGINE.runKernelFunc(forward, inputs, null /* grad */, Reshape, attrs);\n}\nexport const reshape = op({ reshape_ });\n//# sourceMappingURL=reshape.js.map","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { BatchMatMul } from '../kernel_names';\nimport { makeTypesMatch } from '../tensor_util';\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { op } from './operation';\nimport { reshape } from './reshape';\n/**\n * Computes the dot product of two matrices, A * B. These must be matrices.\n *\n * ```js\n * const a = tf.tensor2d([1, 2], [1, 2]);\n * const b = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n *\n * a.matMul(b).print(); // or tf.matMul(a, b)\n * ```\n * @param a First matrix in dot product operation.\n * @param b Second matrix in dot product operation.\n * @param transposeA If true, `a` is transposed before multiplication.\n * @param transposeB If true, `b` is transposed before multiplication.\n *\n * @doc {heading: 'Operations', subheading: 'Matrices'}\n */\nfunction matMul_(a, b, transposeA = false, transposeB = false) {\n let $a = convertToTensor(a, 'a', 'matMul');\n let $b = convertToTensor(b, 'b', 'matMul');\n [$a, $b] = makeTypesMatch($a, $b);\n const forward = (backend, save) => {\n save([$a, $b]);\n const innerShapeA = transposeA ? $a.shape[$a.rank - 2] : $a.shape[$a.rank - 1];\n const innerShapeB = transposeB ? $b.shape[$b.rank - 1] : $b.shape[$b.rank - 2];\n const outerShapeA = transposeA ? $a.shape[$a.rank - 1] : $a.shape[$a.rank - 2];\n const outerShapeB = transposeB ? $b.shape[$b.rank - 2] : $b.shape[$b.rank - 1];\n const outerDimsA = $a.shape.slice(0, -2);\n const outerDimsB = $b.shape.slice(0, -2);\n const batchDimA = util.sizeFromShape(outerDimsA);\n const batchDimB = util.sizeFromShape(outerDimsB);\n const batchDimsCompatible = batchDimA === batchDimB || batchDimA === 1 || batchDimB === 1;\n util.assert($a.rank >= 2 && $b.rank >= 2 && batchDimsCompatible, () => `Error in matMul: the input batch dimensions must either be the ` +\n `same or at least one input batch dimension must be 1. Got input ` +\n `batch dimensions of (${outerDimsA}) and (${outerDimsB}).`);\n util.assert(innerShapeA === innerShapeB, () => `Error in matMul: inner shapes (${innerShapeA}) and (` +\n `${innerShapeB}) of Tensors with shapes ${$a.shape} and ` +\n `${$b.shape} and transposeA=${transposeA}` +\n ` and transposeB=${transposeB} must match.`);\n const outShapeOuterDims = batchDimA > batchDimB ? outerDimsA : outerDimsB;\n const outShape = outShapeOuterDims.concat([outerShapeA, outerShapeB]);\n const a3D = transposeA ?\n reshape($a, [batchDimA, innerShapeA, outerShapeA]) :\n reshape($a, [batchDimA, outerShapeA, innerShapeA]);\n const b3D = transposeB ?\n reshape($b, [batchDimB, outerShapeB, innerShapeB]) :\n reshape($b, [batchDimB, innerShapeB, outerShapeB]);\n const res3d = backend.batchMatMul(a3D, b3D, transposeA, transposeB);\n return reshape(res3d, outShape);\n };\n const inputs = { a: $a, b: $b };\n const attrs = { transposeA, transposeB };\n return ENGINE.runKernelFunc(forward, inputs, null /* grad */, BatchMatMul, attrs);\n}\nexport const matMul = op({ matMul_ });\n//# sourceMappingURL=mat_mul.js.map","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { OneHot } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport { op } from './operation';\nimport { reshape } from './reshape';\n/**\n * Creates a one-hot `tf.Tensor`. The locations represented by `indices` take\n * value `onValue` (defaults to 1), while all other locations take value\n * `offValue` (defaults to 0). If `indices` is rank `R`, the output has rank\n * `R+1` with the last axis of size `depth`.\n *\n * ```js\n * tf.oneHot(tf.tensor1d([0, 1], 'int32'), 3).print();\n * ```\n *\n * @param indices `tf.Tensor` of indices with dtype `int32`.\n * @param depth The depth of the one hot dimension.\n * @param onValue A number used to fill in the output when the index matches\n * the location.\n * @param offValue A number used to fill in the output when the index does\n * not match the location.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction oneHot_(indices, depth, onValue = 1, offValue = 0) {\n if (depth < 2) {\n throw new Error(`Error in oneHot: depth must be >=2, but it is ${depth}`);\n }\n const $indices = convertToTensor(indices, 'indices', 'oneHot', 'int32');\n const outShape = [...$indices.shape, depth];\n const forward = (backend, save) => {\n save([$indices]);\n return reshape(backend.oneHot(reshape($indices, [$indices.size]), depth, onValue, offValue), outShape);\n };\n const inputs = { indices: $indices };\n const attrs = { depth, onValue, offValue };\n return ENGINE.runKernelFunc(forward, inputs, null /* grad */, OneHot, attrs);\n}\nexport const oneHot = op({ oneHot_ });\n//# sourceMappingURL=one_hot.js.map","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { Transpose } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { op } from './operation';\n/**\n * Transposes the `tf.Tensor`. Permutes the dimensions according to `perm`.\n *\n * The returned `tf.Tensor`'s dimension `i` will correspond to the input\n * dimension `perm[i]`. If `perm` is not given, it is set to `[n-1...0]`,\n * where `n` is the rank of the input `tf.Tensor`. Hence by default, this\n * operation performs a regular matrix transpose on 2-D input `tf.Tensor`s.\n *\n * ```js\n * const a = tf.tensor2d([1, 2, 3, 4, 5, 6], [2, 3]);\n *\n * a.transpose().print(); // or tf.transpose(a)\n * ```\n *\n * @param x The tensor to transpose.\n * @param perm The permutation of the dimensions of a.\n *\n * @doc {heading: 'Operations', subheading: 'Matrices'}\n */\nfunction transpose_(x, perm) {\n const $x = convertToTensor(x, 'x', 'transpose');\n if (perm == null) {\n perm = $x.shape.map((s, i) => i).reverse();\n }\n util.assert($x.rank === perm.length, () => `Error in transpose: rank of input ${$x.rank} ` +\n `must match length of perm ${perm}.`);\n perm.forEach(axis => {\n util.assert(axis >= 0 && axis < $x.rank, () => `All entries in 'perm' must be between 0 and ${$x.rank - 1}` +\n ` but got ${perm}`);\n });\n if ($x.rank <= 1) {\n return $x.clone();\n }\n const inputs = { x: $x };\n const attrs = { perm };\n return ENGINE.runKernelFunc(backend => backend.transpose($x, perm), inputs, null /* gradient */, Transpose, attrs);\n}\nexport const transpose = op({ transpose_ });\n//# sourceMappingURL=transpose.js.map","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { cast } from './cast';\nimport { matMul } from './mat_mul';\nimport { oneHot } from './one_hot';\nimport { op } from './operation';\nimport { transpose } from './transpose';\n/**\n * Computes the confusion matrix from true labels and predicted labels.\n *\n * ```js\n * const labels = tf.tensor1d([0, 1, 2, 1, 0], 'int32');\n * const predictions = tf.tensor1d([0, 2, 2, 1, 0], 'int32');\n * const numClasses = 3;\n * const out = tf.math.confusionMatrix(labels, predictions, numClasses);\n * out.print();\n * // Expected output matrix:\n * // [[2, 0, 0],\n * // [0, 1, 1],\n * // [0, 0, 1]]\n * ```\n *\n * @param labels The target labels, assumed to be 0-based integers\n * for the classes. The shape is `[numExamples]`, where\n * `numExamples` is the number of examples included.\n * @param predictions The predicted classes, assumed to be\n * 0-based integers for the classes. Must have the same shape as `labels`.\n * @param numClasses Number of all classes, as an integer.\n * Its value must be larger than the largest element in `labels` and\n * `predictions`.\n * @returns The confusion matrix as a int32-type 2D tensor. The value at\n * row `r` and column `c` is the number of times examples of actual class\n * `r` were predicted as class `c`.\n *\n * @doc {heading: 'Operations', subheading: 'Evaluation'}\n */\nexport function confusionMatrix_(labels, predictions, numClasses) {\n const $labels = convertToTensor(labels, 'labels', 'confusionMatrix');\n const $predictions = convertToTensor(predictions, 'predictions', 'confusionMatrix');\n util.assert(numClasses == null || numClasses > 0 && Number.isInteger(numClasses), () => `If provided, numClasses must be a positive integer, ` +\n `but got ${numClasses}`);\n util.assert($labels.rank === 1, () => `Expected the rank of labels to be 1, but got ${$labels.rank}`);\n util.assert($predictions.rank === 1, () => `Expected the rank of predictions to be 1, ` +\n `but got ${$predictions.rank}`);\n util.assert($labels.shape[0] === $predictions.shape[0], () => `Mismatch in the number of examples: ` +\n `${$labels.shape[0]} vs. ${$predictions.shape[0]}. ` +\n `Labels and predictions should have the same number of elements.`);\n util.assert(numClasses > 0 && Number.isInteger(numClasses), () => `numClasses is required to be a positive integer, but got ` +\n `${numClasses}`);\n // TODO(cais): In the future, if oneHot supports tensors inputs for\n // `numClasses`, `confusionMatrix` can make `numClasses` optional.\n const oneHotLabels = oneHot(cast($labels, 'int32'), numClasses);\n const oneHotPredictions = oneHot(cast($predictions, 'int32'), numClasses);\n const oneHotLabelsT = transpose(oneHotLabels);\n const product = matMul(oneHotLabelsT, oneHotPredictions);\n return cast(product, 'int32');\n}\nexport const confusionMatrix = op({ confusionMatrix_ });\n//# sourceMappingURL=confusion_matrix.js.map","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { inferShape } from '../tensor_util_env';\nimport { assertNonNull } from '../util';\nimport { makeTensor } from './tensor_ops_util';\n/**\n * Creates rank-3 `tf.Tensor` with the provided values, shape and dtype.\n *\n * The same functionality can be achieved with `tf.tensor`, but in general\n * we recommend using `tf.tensor3d` as it makes the code more readable.\n *\n * ```js\n * // Pass a nested array.\n * tf.tensor3d([[[1], [2]], [[3], [4]]]).print();\n * ```\n * ```js\n * // Pass a flat array and specify a shape.\n * tf.tensor3d([1, 2, 3, 4], [2, 2, 1]).print();\n * ```\n *\n * @param values The values of the tensor. Can be nested array of numbers,\n * or a flat array, or a `TypedArray`.\n * @param shape The shape of the tensor. If not provided, it is inferred from\n * `values`.\n * @param dtype The data type.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function tensor3d(values, shape, dtype) {\n assertNonNull(values);\n if (shape != null && shape.length !== 3) {\n throw new Error('tensor3d() requires shape to have three numbers');\n }\n const inferredShape = inferShape(values, dtype);\n if (inferredShape.length !== 3 && inferredShape.length !== 1) {\n throw new Error('tensor3d() requires values to be number[][][] or flat/TypedArray');\n }\n if (inferredShape.length === 1 && shape == null) {\n throw new Error('tensor3d() requires shape to be provided when `values` ' +\n 'are a flat array');\n }\n return makeTensor(values, shape, inferredShape, dtype);\n}\n//# sourceMappingURL=tensor3d.js.map","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { FromPixels } from '../kernel_names';\nimport { getKernel } from '../kernel_registry';\nimport { Tensor } from '../tensor';\nimport { convertToTensor } from '../tensor_util_env';\nimport { cast } from './cast';\nimport { op } from './operation';\nimport { tensor3d } from './tensor3d';\nlet fromPixels2DContext;\n/**\n * Creates a `tf.Tensor` from an image.\n *\n * ```js\n * const image = new ImageData(1, 1);\n * image.data[0] = 100;\n * image.data[1] = 150;\n * image.data[2] = 200;\n * image.data[3] = 255;\n *\n * tf.browser.fromPixels(image).print();\n * ```\n *\n * @param pixels The input image to construct the tensor from. The\n * supported image types are all 4-channel. You can also pass in an image\n * object with following attributes:\n * `{data: Uint8Array; width: number; height: number}`\n * @param numChannels The number of channels of the output tensor. A\n * numChannels value less than 4 allows you to ignore channels. Defaults to\n * 3 (ignores alpha channel of input image).\n *\n * @doc {heading: 'Browser', namespace: 'browser', ignoreCI: true}\n */\nfunction fromPixels_(pixels, numChannels = 3) {\n // Sanity checks.\n if (numChannels > 4) {\n throw new Error('Cannot construct Tensor with more than 4 channels from pixels.');\n }\n if (pixels == null) {\n throw new Error('pixels passed to tf.browser.fromPixels() can not be null');\n }\n let isPixelData = false;\n let isImageData = false;\n let isVideo = false;\n let isImage = false;\n let isCanvasLike = false;\n if (pixels.data instanceof Uint8Array) {\n isPixelData = true;\n }\n else if (typeof (ImageData) !== 'undefined' && pixels instanceof ImageData) {\n isImageData = true;\n }\n else if (typeof (HTMLVideoElement) !== 'undefined' &&\n pixels instanceof HTMLVideoElement) {\n isVideo = true;\n }\n else if (typeof (HTMLImageElement) !== 'undefined' &&\n pixels instanceof HTMLImageElement) {\n isImage = true;\n // tslint:disable-next-line: no-any\n }\n else if (pixels.getContext != null) {\n isCanvasLike = true;\n }\n else {\n throw new Error('pixels passed to tf.browser.fromPixels() must be either an ' +\n `HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData ` +\n `in browser, or OffscreenCanvas, ImageData in webworker` +\n ` or {data: Uint32Array, width: number, height: number}, ` +\n `but was ${pixels.constructor.name}`);\n }\n if (isVideo) {\n const HAVE_CURRENT_DATA_READY_STATE = 2;\n if (isVideo &&\n pixels.readyState <\n HAVE_CURRENT_DATA_READY_STATE) {\n throw new Error('The video element has not loaded data yet. Please wait for ' +\n '`loadeddata` event on the