1. Test Modules
  2. Training Characteristics
    1. Input Learning
      1. Gradient Descent
      2. Conjugate Gradient Descent
      3. Limited-Memory BFGS
    2. Results
  3. Results

Subreport: Logs for com.simiacryptus.ref.lang.ReferenceCountingBase

Test Modules

Using Seed 6023951248040130560

Training Characteristics

Input Learning

In this apply, we use a network to learn this target input, given it's pre-evaluated output:

TrainingTester.java:332 executed in 0.01 seconds (0.000 gc):

    return RefArrays.stream(RefUtil.addRef(input_target)).flatMap(RefArrays::stream).map(x -> {
      try {
        return x.prettyPrint();
      } finally {
        x.freeRef();
      }
    }).reduce((a, b) -> a + "\n" + b).orElse("");

Returns

    [
    	[ [ -0.712 ], [ -0.384 ], [ -1.424 ], [ -0.608 ], [ 1.208 ], [ -0.892 ], [ 0.08 ], [ -1.516 ] ],
    	[ [ 1.356 ], [ 1.612 ], [ -1.54 ], [ 1.524 ], [ 1.704 ], [ 1.652 ], [ 1.552 ], [ -0.852 ] ],
    	[ [ 1.108 ], [ 1.368 ], [ -0.636 ], [ -1.856 ], [ 0.788 ], [ 0.048 ], [ 1.64 ], [ 0.996 ] ],
    	[ [ -1.72 ], [ 1.512 ], [ -1.832 ], [ -1.476 ], [ -1.688 ], [ 0.496 ], [ -0.384 ], [ -0.012 ] ],
    	[ [ 0.092 ], [ -1.572 ], [ 1.556 ], [ -1.228 ], [ 1.764 ], [ -1.028 ], [ 1.912 ], [ -0.876 ] ],
    	[ [ 1.048 ], [ -0.768 ], [ -0.032 ], [ -0.804 ], [ 1.62 ], [ -0.804 ], [ 1.876 ], [ -1.492 ] ],
    	[ [ 0.148 ], [ 0.636 ], [ -0.408 ], [ -1.616 ], [ 0.7 ], [ 0.392 ], [ 0.028 ], [ 1.556 ] ],
    	[ [ 0.048 ], [ -0.176 ], [ -0.128 ], [ 1.032 ], [ -0.068 ], [ -0.556 ], [ 0.3 ], [ -0.464 ] ]
    ]
    [
    	[ [ 1.776 ], [ -0.472 ], [ 0.56 ], [ -0.856 ], [ -1.808 ], [ -1.156 ], [ -1.76 ], [ -1.34 ] ],
    	[ [ 0.52 ], [ 0.184 ], [ 0.012 ], [ -1.1 ], [ -0.124 ], [ -0.968 ], [ -1.58 ], [ -0.808 ] ],
    	[ [ -1.484 ], [ -0.312 ], [ -1.764 ], [ 1.916 ], [ 0.016 ], [ -0.968 ], [ 0.344 ], [ -1.16 ] ],
    	[ [ 1.98 ], [ 1.352 ], [ 0.688 ], [ 1.444 ], [ 1.288 ], [ -1.456 ], [ -1.176 ], [ -1.656 ] ],
    	[ [ 0.812 ], [ 1.42 ], [ -0.892 ], [ -0.628 ], [ 0.82 ], [ 0.66 ], [ -1.16 ], [ -1.724 ] ],
    	[ [ 1.628 ], [ 0.52 ], [ 0.972 ], [ -2.0 ], [ 1.156 ], [ -1.564 ], [ -1.256 ], [ -1.248 ] ],
    	[ [ 0.644 ], [ -0.888 ], [ -0.316 ], [ -0.368 ], [ -1.256 ], [ -0.784 ], [ -0.504 ], [ 1.628 ] ],
    	[ [ -1.552 ], [ 1.24 ], [ 1.324 ], [ -1.664 ], [ 0.692 ], [ 0.82 ], [ 1.956 ], [ -1.116 ] ]
    ]
    [
    	[ [ 1.524 ], [ 1.048 ], [ 0.092 ], [ -1.616 ], [ 1.876 ], [ -1.516 ], [ 0.636 ], [ -0.804 ] ],
    	[ [ 1.356 ], [ -1.688 ], [ -0.384 ], [ -0.068 ], [ -1.028 ], [ -1.832 ], [ 1.556 ], [ -1.228 ] ],
    	[ [ 1.368 ], [ -0.876 ], [ 1.764 ], [ -0.032 ], [ 0.788 ], [ 0.148 ], [ -0.636 ], [ -0.892 ] ],
    	[ [ 1.108 ], [ 1.512 ], [ 1.64 ], [ -0.768 ], [ -0.804 ], [ -1.54 ], [ 1.032 ], [ 0.7 ] ],
    	[ [ 1.912 ], [ -0.012 ], [ -0.408 ], [ 1.556 ], [ -0.852 ], [ -0.556 ], [ -1.572 ], [ -0.176 ] ],
    	[ [ 1.208 ], [ 0.3 ], [ -0.712 ], [ -1.476 ], [ -0.128 ], [ 0.496 ], [ 1.62 ], [ 1.704 ] ],
    	[ [ 0.08 ], [ -0.384 ], [ 0.048 ], [ 1.652 ], [ 1.552 ], [ 0.028 ], [ -0.464 ], [ 0.996 ] ],
    	[ [ -1.492 ], [ 0.048 ], [ 1.612 ], [ 0.392 ], [ -1.856 ], [ -1.72 ], [ -1.424 ], [ -0.608 ] ]
    ]
    [
    	[ [ -1.58 ], [ 1.444 ], [ -1.176 ], [ 0.82 ], [ 1.98 ], [ -1.724 ], [ -0.504 ], [ -1.484 ] ],
    	[ [ 1.24 ], [ -0.892 ], [ 1.288 ], [ -0.312 ], [ 0.66 ], [ -1.34 ], [ -0.888 ], [ -1.16 ] ],
    	[ [ 0.184 ], [ -0.316 ], [ 1.42 ], [ -1.76 ], [ 0.56 ], [ 0.016 ], [ -0.124 ], [ -1.1 ] ],
    	[ [ -0.368 ], [ -1.256 ], [ -1.156 ], [ 0.688 ], [ -1.764 ], [ 1.156 ], [ 1.628 ], [ -1.656 ] ],
    	[ [ 0.692 ], [ 0.644 ], [ 0.012 ], [ -1.456 ], [ -0.472 ], [ -1.116 ], [ -0.856 ], [ -0.968 ] ],
    	[ [ 0.82 ], [ 1.352 ], [ 0.52 ], [ -0.968 ], [ 0.812 ], [ -0.784 ], [ -1.256 ], [ 1.628 ] ],
    	[ [ -1.564 ], [ -2.0 ], [ -1.664 ], [ 0.344 ], [ 0.52 ], [ -0.628 ], [ -1.16 ], [ 1.776 ] ],
    	[ [ -1.552 ], [ 1.956 ], [ 1.916 ], [ -1.248 ], [ -0.808 ], [ 1.324 ], [ 0.972 ], [ -1.808 ] ]
    ]
    [
    	[ [ 1.356 ], [ 0.148 ], [ 0.636 ], [ 1.512 ], [ -1.424 ], [ 1.64 ], [ -0.384 ], [ 1.764 ] ],
    	[ [ 1.612 ], [ -0.556 ], [ -0.408 ], [ 1.552 ], [ 0.3 ], [ 0.7 ], [ 0.788 ], [ 1.556 ] ],
    	[ [ -1.572 ], [ -1.54 ], [ 1.912 ], [ 1.876 ], [ -1.856 ], [ 0.392 ], [ -0.636 ], [ 1.032 ] ],
    	[ [ -0.712 ], [ -0.804 ], [ -0.852 ], [ -0.128 ], [ -1.228 ], [ -1.516 ], [ 0.048 ], [ 0.08 ] ],
    	[ [ -0.176 ], [ -0.608 ], [ 1.368 ], [ -0.768 ], [ -0.892 ], [ -1.688 ], [ -0.804 ], [ 1.652 ] ],
    	[ [ -0.012 ], [ -1.492 ], [ -0.068 ], [ 1.208 ], [ -0.876 ], [ 1.62 ], [ 0.996 ], [ 1.704 ] ],
    	[ [ 1.048 ], [ 0.028 ], [ 0.496 ], [ 1.556 ], [ 0.092 ], [ -0.464 ], [ 1.524 ], [ 0.048 ] ],
    	[ [ -1.476 ], [ 1.108 ], [ -0.384 ], [ -1.028 ], [ -1.832 ], [ -0.032 ], [ -1.616 ], [ -1.72 ] ]
    ]
    [
    	[ [ -1.456 ], [ 0.82 ], [ 1.628 ], [ 1.24 ], [ -0.892 ], [ -1.552 ], [ 0.688 ], [ -1.256 ] ],
    	[ [ 0.52 ], [ -0.504 ], [ -0.888 ], [ -0.968 ], [ 0.56 ], [ -1.656 ], [ -1.16 ], [ 1.628 ] ],
    	[ [ 0.52 ], [ -0.628 ], [ -0.316 ], [ -1.664 ], [ -1.76 ], [ -1.16 ], [ -1.764 ], [ -1.808 ] ],
    	[ [ 1.42 ], [ -0.784 ], [ -0.312 ], [ -1.248 ], [ 0.184 ], [ 1.444 ], [ -1.564 ], [ -2.0 ] ],
    	[ [ 1.288 ], [ 1.324 ], [ 0.812 ], [ -1.484 ], [ 1.98 ], [ 0.012 ], [ -0.968 ], [ 0.82 ] ],
    	[ [ -0.856 ], [ -1.156 ], [ 1.352 ], [ -1.34 ], [ 1.156 ], [ -1.256 ], [ -1.116 ], [ -0.472 ] ],
    	[ [ 1.776 ], [ 0.016 ], [ 0.644 ], [ -1.176 ], [ 0.972 ], [ -1.724 ], [ -0.368 ], [ -0.124 ] ],
    	[ [ 1.916 ], [ -1.58 ], [ 0.66 ], [ 1.956 ], [ 0.692 ], [ 0.344 ], [ -0.808 ], [ -1.1 ] ]
    ]
    [
    	[ [ -0.556 ], [ 1.876 ], [ -0.464 ], [ -0.408 ], [ -0.712 ], [ 1.912 ], [ -0.012 ], [ -0.384 ] ],
    	[ [ -1.516 ], [ -0.608 ], [ -0.068 ], [ 1.108 ], [ 1.524 ], [ -0.892 ], [ 0.496 ], [ -1.832 ] ],
    	[ [ 1.764 ], [ -1.424 ], [ -0.032 ], [ -1.572 ], [ 1.64 ], [ 1.208 ], [ -1.54 ], [ 1.556 ] ],
    	[ [ -0.804 ], [ 1.652 ], [ -0.768 ], [ 0.392 ], [ -1.228 ], [ 1.556 ], [ -0.384 ], [ -0.636 ] ],
    	[ [ 1.552 ], [ -0.852 ], [ -1.856 ], [ 1.368 ], [ -1.028 ], [ -1.492 ], [ 1.62 ], [ 0.636 ] ],
    	[ [ 0.048 ], [ 1.356 ], [ 0.028 ], [ 1.704 ], [ 1.048 ], [ -0.804 ], [ -0.876 ], [ 0.048 ] ],
    	[ [ 0.092 ], [ 0.148 ], [ -1.72 ], [ -1.476 ], [ -0.176 ], [ -1.616 ], [ 1.612 ], [ -0.128 ] ],
    	[ [ 0.3 ], [ 0.996 ], [ 0.08 ], [ 1.032 ], [ 1.512 ], [ 0.788 ], [ 0.7 ], [ -1.688 ] ]
    ]
    [
    	[ [ -0.316 ], [ -1.484 ], [ -1.552 ], [ -1.456 ], [ 0.56 ], [ -0.312 ], [ -1.256 ], [ -1.76 ] ],
    	[ [ -0.368 ], [ -1.764 ], [ 1.628 ], [ 0.82 ], [ -0.504 ], [ -1.16 ], [ 0.644 ], [ -0.784 ] ],
    	[ [ 0.016 ], [ -1.16 ], [ -0.628 ], [ -0.472 ], [ 0.82 ], [ 1.916 ], [ -1.34 ], [ 1.444 ] ],
    	[ [ 1.628 ], [ 1.24 ], [ 0.52 ], [ -1.1 ], [ -0.968 ], [ 0.972 ], [ 0.812 ], [ -1.116 ] ],
    	[ [ -1.656 ], [ -0.808 ], [ 0.012 ], [ -1.564 ], [ -1.248 ], [ -0.892 ], [ -1.58 ], [ 1.956 ] ],
    	[ [ 0.184 ], [ -0.856 ], [ 1.352 ], [ -0.968 ], [ -1.176 ], [ 0.692 ], [ -1.156 ], [ -0.124 ] ],
    	[ [ 1.42 ], [ 0.688 ], [ -1.808 ], [ 1.156 ], [ 0.344 ], [ -0.888 ], [ 1.288 ], [ 0.52 ] ],
    	[ [ -1.724 ], [ -1.664 ], [ -2.0 ], [ 1.98 ], [ 1.776 ], [ 0.66 ], [ -1.256 ], [ 1.324 ] ]
    ]
    [
    	[ [ 0.496 ], [ 1.032 ], [ -0.768 ], [ -0.128 ], [ 0.392 ], [ -0.892 ], [ 1.368 ], [ 1.912 ] ],
    	[ [ 1.512 ], [ 0.3 ], [ -1.516 ], [ 1.556 ], [ 0.08 ], [ 1.704 ], [ -0.384 ], [ -0.068 ] ],
    	[ [ 1.048 ], [ 1.208 ], [ 1.552 ], [ 1.524 ], [ -1.72 ], [ 1.62 ], [ 0.028 ], [ -1.616 ] ],
    	[ [ -1.832 ], [ -0.712 ], [ 0.048 ], [ 1.764 ], [ -1.54 ], [ 0.148 ], [ -1.856 ], [ -0.012 ] ],
    	[ [ 0.996 ], [ 0.092 ], [ 0.788 ], [ 1.64 ], [ -0.176 ], [ -0.852 ], [ -0.556 ], [ 1.612 ] ],
    	[ [ 0.636 ], [ -0.608 ], [ -1.572 ], [ -0.408 ], [ -0.384 ], [ -0.636 ], [ 1.108 ], [ 1.556 ] ],
    	[ [ 1.356 ], [ 0.7 ], [ 0.048 ], [ -1.476 ], [ 1.652 ], [ -0.464 ], [ -1.424 ], [ -1.688 ] ],
    	[ [ -0.804 ], [ -1.028 ], [ -0.804 ], [ -1.492 ], [ -0.032 ], [ -1.228 ], [ -0.876 ], [ 1.876 ] ]
    ]
    [
    	[ [ 0.344 ], [ 0.012 ], [ -1.664 ], [ 0.972 ], [ 1.288 ], [ 1.916 ], [ -0.316 ], [ -0.856 ] ],
    	[ [ 0.66 ], [ -1.248 ], [ -1.58 ], [ -1.16 ], [ -2.0 ], [ 1.42 ], [ 1.956 ], [ 1.444 ] ],
    	[ [ -1.764 ], [ 1.156 ], [ 0.184 ], [ -1.76 ], [ -0.368 ], [ -1.16 ], [ -1.484 ], [ -0.892 ] ],
    	[ [ -1.176 ], [ 0.82 ], [ -0.472 ], [ 0.016 ], [ 1.628 ], [ 1.324 ], [ -1.1 ], [ -0.968 ] ],
    	[ [ 1.352 ], [ -1.256 ], [ -1.564 ], [ 0.56 ], [ -0.888 ], [ -0.312 ], [ 0.688 ], [ 1.98 ] ],
    	[ [ 0.52 ], [ -0.968 ], [ -1.656 ], [ -1.808 ], [ -1.256 ], [ 1.776 ], [ -0.784 ], [ 0.692 ] ],
    	[ [ -1.34 ], [ -1.456 ], [ 0.82 ], [ 1.24 ], [ -0.628 ], [ -1.552 ], [ -0.808 ], [ -1.116 ] ],
    	[ [ -0.504 ], [ 1.628 ], [ -1.724 ], [ -1.156 ], [ -0.124 ], [ 0.644 ], [ 0.52 ], [ 0.812 ] ]
    ]

Gradient Descent

First, we train using basic gradient descent method apply weak line search conditions.

TrainingTester.java:480 executed in 0.05 seconds (0.000 gc):

    IterativeTrainer iterativeTrainer = new IterativeTrainer(trainable.addRef());
    try {
      iterativeTrainer.setLineSearchFactory(label -> new ArmijoWolfeSearch());
      iterativeTrainer.setOrientation(new GradientDescent());
      iterativeTrainer.setMonitor(TrainingTester.getMonitor(history));
      iterativeTrainer.setTimeout(30, TimeUnit.SECONDS);
      iterativeTrainer.setMaxIterations(250);
      iterativeTrainer.setTerminateThreshold(0);
      return iterativeTrainer.run();
    } finally {
      iterativeTrainer.freeRef();
    }
Logging
Reset training subject: 937151519995
BACKPROP_AGG_SIZE = 3
THREADS = 64
SINGLE_THREADED = false
Initialized CoreSettings = {
"backpropAggregationSize" : 3,
"jvmThreads" : 64,
"singleThreaded" : false
}
Final threshold in iteration 0: 0.0 (> 0.0) after 0.045s (< 30.000s)

Returns

    0.0

This training apply resulted in the following configuration:

TrainingTester.java:610 executed in 0.00 seconds (0.000 gc):

    RefList<double[]> state = network.state();
    assert state != null;
    String description = state.stream().map(RefArrays::toString).reduce((a, b) -> a + "\n" + b)
        .orElse("");
    state.freeRef();
    return description;

Returns

    

And regressed input:

TrainingTester.java:622 executed in 0.00 seconds (0.000 gc):

    return RefArrays.stream(RefUtil.addRef(data)).flatMap(x -> {
      return RefArrays.stream(x);
    }).limit(1).map(x -> {
      String temp_18_0015 = x.prettyPrint();
      x.freeRef();
      return temp_18_0015;
    }).reduce((a, b) -> a + "\n" + b).orElse("");

Returns

    [
    	[ [ -1.856 ], [ 0.392 ], [ -1.832 ], [ 0.636 ], [ 0.092 ], [ 1.652 ], [ -0.556 ], [ 1.108 ] ],
    	[ [ 1.764 ], [ -1.688 ], [ 0.996 ], [ 0.028 ], [ -0.128 ], [ 0.048 ], [ 1.552 ], [ -0.892 ] ],
    	[ [ -1.228 ], [ -1.572 ], [ 1.032 ], [ -1.028 ], [ -0.804 ], [ 1.512 ], [ 0.496 ], [ -0.176 ] ],
    	[ [ -0.384 ], [ -1.476 ], [ -0.068 ], [ 1.048 ], [ 1.556 ], [ 0.08 ], [ 1.62 ], [ 0.7 ] ],
    	[ [ -0.012 ], [ -0.804 ], [ 0.3 ], [ -0.852 ], [ -0.032 ], [ 1.704 ], [ -0.608 ], [ -1.616 ] ],
    	[ [ -1.492 ], [ -0.464 ], [ 0.788 ], [ -0.408 ], [ 1.912 ], [ -1.424 ], [ -0.768 ], [ -0.712 ] ],
    	[ [ -1.516 ], [ -1.54 ], [ -0.636 ], [ 0.048 ], [ 1.612 ], [ 1.556 ], [ 0.148 ], [ 1.356 ] ],
    	[ [ 1.208 ], [ -0.384 ], [ 1.368 ], [ 1.876 ], [ 1.524 ], [ -0.876 ], [ -1.72 ], [ 1.64 ] ]
    ]

To produce the following output:

TrainingTester.java:633 executed in 0.00 seconds (0.000 gc):

    Result[] array = ConstantResult.batchResultArray(pop(RefUtil.addRef(data)));
    @Nullable
    Result eval = layer.eval(array);
    assert eval != null;
    TensorList tensorList = Result.getData(eval);
    String temp_18_0016 = tensorList.stream().limit(1).map(x -> {
      String temp_18_0017 = x.prettyPrint();
      x.freeRef();
      return temp_18_0017;
    }).reduce((a, b) -> a + "\n" + b).orElse("");
    tensorList.freeRef();
    return temp_18_0016;

Returns

    [ 2.8158015 ]

Conjugate Gradient Descent

First, we use a conjugate gradient descent method, which converges the fastest for purely linear functions.

TrainingTester.java:452 executed in 0.01 seconds (0.000 gc):

    IterativeTrainer iterativeTrainer = new IterativeTrainer(trainable.addRef());
    try {
      iterativeTrainer.setLineSearchFactory(label -> new QuadraticSearch());
      iterativeTrainer.setOrientation(new GradientDescent());
      iterativeTrainer.setMonitor(TrainingTester.getMonitor(history));
      iterativeTrainer.setTimeout(30, TimeUnit.SECONDS);
      iterativeTrainer.setMaxIterations(250);
      iterativeTrainer.setTerminateThreshold(0);
      return iterativeTrainer.run();
    } finally {
      iterativeTrainer.freeRef();
    }
Logging
Reset training subject: 937214357728
Final threshold in iteration 0: 0.0 (> 0.0) after 0.010s (< 30.000s)

Returns

    0.0

This training apply resulted in the following configuration:

TrainingTester.java:610 executed in 0.00 seconds (0.000 gc):

    RefList<double[]> state = network.state();
    assert state != null;
    String description = state.stream().map(RefArrays::toString).reduce((a, b) -> a + "\n" + b)
        .orElse("");
    state.freeRef();
    return description;

Returns

    

And regressed input:

TrainingTester.java:622 executed in 0.00 seconds (0.000 gc):

    return RefArrays.stream(RefUtil.addRef(data)).flatMap(x -> {
      return RefArrays.stream(x);
    }).limit(1).map(x -> {
      String temp_18_0015 = x.prettyPrint();
      x.freeRef();
      return temp_18_0015;
    }).reduce((a, b) -> a + "\n" + b).orElse("");

Returns

    [
    	[ [ -1.856 ], [ 0.392 ], [ -1.832 ], [ 0.636 ], [ 0.092 ], [ 1.652 ], [ -0.556 ], [ 1.108 ] ],
    	[ [ 1.764 ], [ -1.688 ], [ 0.996 ], [ 0.028 ], [ -0.128 ], [ 0.048 ], [ 1.552 ], [ -0.892 ] ],
    	[ [ -1.228 ], [ -1.572 ], [ 1.032 ], [ -1.028 ], [ -0.804 ], [ 1.512 ], [ 0.496 ], [ -0.176 ] ],
    	[ [ -0.384 ], [ -1.476 ], [ -0.068 ], [ 1.048 ], [ 1.556 ], [ 0.08 ], [ 1.62 ], [ 0.7 ] ],
    	[ [ -0.012 ], [ -0.804 ], [ 0.3 ], [ -0.852 ], [ -0.032 ], [ 1.704 ], [ -0.608 ], [ -1.616 ] ],
    	[ [ -1.492 ], [ -0.464 ], [ 0.788 ], [ -0.408 ], [ 1.912 ], [ -1.424 ], [ -0.768 ], [ -0.712 ] ],
    	[ [ -1.516 ], [ -1.54 ], [ -0.636 ], [ 0.048 ], [ 1.612 ], [ 1.556 ], [ 0.148 ], [ 1.356 ] ],
    	[ [ 1.208 ], [ -0.384 ], [ 1.368 ], [ 1.876 ], [ 1.524 ], [ -0.876 ], [ -1.72 ], [ 1.64 ] ]
    ]

To produce the following output:

TrainingTester.java:633 executed in 0.00 seconds (0.000 gc):

    Result[] array = ConstantResult.batchResultArray(pop(RefUtil.addRef(data)));
    @Nullable
    Result eval = layer.eval(array);
    assert eval != null;
    TensorList tensorList = Result.getData(eval);
    String temp_18_0016 = tensorList.stream().limit(1).map(x -> {
      String temp_18_0017 = x.prettyPrint();
      x.freeRef();
      return temp_18_0017;
    }).reduce((a, b) -> a + "\n" + b).orElse("");
    tensorList.freeRef();
    return temp_18_0016;

Returns

    [ 2.8158015 ]

Limited-Memory BFGS

Next, we apply the same optimization using L-BFGS, which is nearly ideal for purely second-order or quadratic functions.

TrainingTester.java:509 executed in 0.02 seconds (0.000 gc):

    IterativeTrainer iterativeTrainer = new IterativeTrainer(trainable.addRef());
    try {
      iterativeTrainer.setLineSearchFactory(label -> new ArmijoWolfeSearch());
      iterativeTrainer.setOrientation(new LBFGS());
      iterativeTrainer.setMonitor(TrainingTester.getMonitor(history));
      iterativeTrainer.setTimeout(30, TimeUnit.SECONDS);
      iterativeTrainer.setIterationsPerSample(100);
      iterativeTrainer.setMaxIterations(250);
      iterativeTrainer.setTerminateThreshold(0);
      return iterativeTrainer.run();
    } finally {
      iterativeTrainer.freeRef();
    }
Logging
Reset training subject: 937237129153
Final threshold in iteration 0: 0.0 (> 0.0) after 0.015s (< 30.000s)

Returns

    0.0

This training apply resulted in the following configuration:

TrainingTester.java:610 executed in 0.00 seconds (0.000 gc):

    RefList<double[]> state = network.state();
    assert state != null;
    String description = state.stream().map(RefArrays::toString).reduce((a, b) -> a + "\n" + b)
        .orElse("");
    state.freeRef();
    return description;

Returns

    

And regressed input:

TrainingTester.java:622 executed in 0.00 seconds (0.000 gc):

    return RefArrays.stream(RefUtil.addRef(data)).flatMap(x -> {
      return RefArrays.stream(x);
    }).limit(1).map(x -> {
      String temp_18_0015 = x.prettyPrint();
      x.freeRef();
      return temp_18_0015;
    }).reduce((a, b) -> a + "\n" + b).orElse("");

Returns

    [
    	[ [ -1.856 ], [ 0.392 ], [ -1.832 ], [ 0.636 ], [ 0.092 ], [ 1.652 ], [ -0.556 ], [ 1.108 ] ],
    	[ [ 1.764 ], [ -1.688 ], [ 0.996 ], [ 0.028 ], [ -0.128 ], [ 0.048 ], [ 1.552 ], [ -0.892 ] ],
    	[ [ -1.228 ], [ -1.572 ], [ 1.032 ], [ -1.028 ], [ -0.804 ], [ 1.512 ], [ 0.496 ], [ -0.176 ] ],
    	[ [ -0.384 ], [ -1.476 ], [ -0.068 ], [ 1.048 ], [ 1.556 ], [ 0.08 ], [ 1.62 ], [ 0.7 ] ],
    	[ [ -0.012 ], [ -0.804 ], [ 0.3 ], [ -0.852 ], [ -0.032 ], [ 1.704 ], [ -0.608 ], [ -1.616 ] ],
    	[ [ -1.492 ], [ -0.464 ], [ 0.788 ], [ -0.408 ], [ 1.912 ], [ -1.424 ], [ -0.768 ], [ -0.712 ] ],
    	[ [ -1.516 ], [ -1.54 ], [ -0.636 ], [ 0.048 ], [ 1.612 ], [ 1.556 ], [ 0.148 ], [ 1.356 ] ],
    	[ [ 1.208 ], [ -0.384 ], [ 1.368 ], [ 1.876 ], [ 1.524 ], [ -0.876 ], [ -1.72 ], [ 1.64 ] ]
    ]

To produce the following output:

TrainingTester.java:633 executed in 0.00 seconds (0.000 gc):

    Result[] array = ConstantResult.batchResultArray(pop(RefUtil.addRef(data)));
    @Nullable
    Result eval = layer.eval(array);
    assert eval != null;
    TensorList tensorList = Result.getData(eval);
    String temp_18_0016 = tensorList.stream().limit(1).map(x -> {
      String temp_18_0017 = x.prettyPrint();
      x.freeRef();
      return temp_18_0017;
    }).reduce((a, b) -> a + "\n" + b).orElse("");
    tensorList.freeRef();
    return temp_18_0016;

Returns

    [ 2.8158015 ]

TrainingTester.java:432 executed in 0.01 seconds (0.000 gc):

    return TestUtil.compare(title + " vs Iteration", runs);
Logging
No Data

TrainingTester.java:435 executed in 0.00 seconds (0.000 gc):

    return TestUtil.compareTime(title + " vs Time", runs);
Logging
No Data

Results

TrainingTester.java:255 executed in 0.03 seconds (0.000 gc):

    return grid(inputLearning, modelLearning, completeLearning);

Returns

Result

TrainingTester.java:258 executed in 0.00 seconds (0.000 gc):

    return new ComponentResult(null == inputLearning ? null : inputLearning.value,
        null == modelLearning ? null : modelLearning.value, null == completeLearning ? null : completeLearning.value);

Returns

    {"input":{ "LBFGS": { "type": "NonConverged", "value": NaN }, "CjGD": { "type": "NonConverged", "value": NaN }, "GD": { "type": "NonConverged", "value": NaN } }, "model":null, "complete":null}

LayerTests.java:425 executed in 0.00 seconds (0.000 gc):

    throwException(exceptions.addRef());

Results

detailsresult
{"input":{ "LBFGS": { "type": "NonConverged", "value": NaN }, "CjGD": { "type": "NonConverged", "value": NaN }, "GD": { "type": "NonConverged", "value": NaN } }, "model":null, "complete":null}OK
  {
    "result": "OK",
    "performance": {
      "execution_time": "0.450",
      "gc_time": "0.155"
    },
    "created_on": 1586735524948,
    "file_name": "trainingTest",
    "report": {
      "simpleName": "Basic",
      "canonicalName": "com.simiacryptus.mindseye.layers.java.MeanSqLossLayerTest.Basic",
      "link": "https://github.com/SimiaCryptus/mindseye-java/tree/93db34cedee48c0202777a2b25deddf1dfaf5731/src/test/java/com/simiacryptus/mindseye/layers/java/MeanSqLossLayerTest.java",
      "javaDoc": ""
    },
    "training_analysis": {
      "input": {
        "LBFGS": {
          "type": "NonConverged",
          "value": "NaN"
        },
        "CjGD": {
          "type": "NonConverged",
          "value": "NaN"
        },
        "GD": {
          "type": "NonConverged",
          "value": "NaN"
        }
      }
    },
    "archive": "s3://code.simiacrypt.us/tests/com/simiacryptus/mindseye/layers/java/MeanSqLossLayer/Basic/trainingTest/202004125204",
    "id": "e3067a5d-f343-4afd-af71-9955c405750e",
    "report_type": "Components",
    "display_name": "Comparative Training",
    "target": {
      "simpleName": "MeanSqLossLayer",
      "canonicalName": "com.simiacryptus.mindseye.layers.java.MeanSqLossLayer",
      "link": "https://github.com/SimiaCryptus/mindseye-java/tree/93db34cedee48c0202777a2b25deddf1dfaf5731/src/main/java/com/simiacryptus/mindseye/layers/java/MeanSqLossLayer.java",
      "javaDoc": ""
    }
  }