1. Test Modules
  2. Training Characteristics
    1. Input Learning
      1. Gradient Descent
      2. Conjugate Gradient Descent
      3. Limited-Memory BFGS
    2. Results
  3. Results

Subreport: Logs for com.simiacryptus.ref.lang.ReferenceCountingBase

Test Modules

Using Seed 8422966376684805120

Training Characteristics

Input Learning

In this apply, we use a network to learn this target input, given it's pre-evaluated output:

TrainingTester.java:332 executed in 0.01 seconds (0.000 gc):

    return RefArrays.stream(RefUtil.addRef(input_target)).flatMap(RefArrays::stream).map(x -> {
      try {
        return x.prettyPrint();
      } finally {
        x.freeRef();
      }
    }).reduce((a, b) -> a + "\n" + b).orElse("");

Returns

    [
    	[ [ 0.092, 0.392 ], [ -0.892, 0.016 ], [ 1.556, -1.248 ], [ -0.808, -2.0 ], [ -1.16, 0.688 ], [ -0.784, -0.556 ], [ 1.64, 0.996 ], [ 0.496, -1.028 ] ],
    	[ [ 0.52, -0.768 ], [ 1.368, 1.956 ], [ 1.208, 0.028 ], [ -1.664, -0.504 ], [ -1.764, 1.776 ], [ 1.352, 0.184 ], [ 1.42, -0.888 ], [ -0.608, 1.628 ] ],
    	[ [ -0.176, 0.048 ], [ 1.512, 1.556 ], [ -1.156, -1.72 ], [ -0.472, -1.256 ], [ 1.912, -1.456 ], [ 0.972, 1.524 ], [ -1.54, 0.7 ], [ 1.612, -0.968 ] ],
    	[ [ 0.148, -1.572 ], [ -0.128, -0.852 ], [ 0.82, 0.788 ], [ 1.98, 0.692 ], [ -0.316, -0.032 ], [ -1.116, 0.66 ], [ -0.712, 0.636 ], [ 1.032, 1.356 ] ],
    	[ [ 1.876, -0.012 ], [ -1.564, -0.124 ], [ -1.228, 1.704 ], [ 1.628, -1.76 ], [ -1.58, 0.52 ], [ -1.516, -1.808 ], [ -0.804, -0.368 ], [ -1.688, -0.876 ] ],
    	[ [ 0.08, -0.856 ], [ 0.812, -1.176 ], [ 0.3, -0.968 ], [ 1.324, 0.344 ], [ 1.288, -1.552 ], [ -1.34, -1.616 ], [ -0.464, -0.384 ], [ 1.048, 0.644 ] ],
    	[ [ 0.048, -1.476 ], [ 0.82, 1.444 ], [ -1.856, -1.656 ], [ -0.068, -1.424 ], [ 1.916, -0.636 ], [ 0.012, -1.1 ], [ 1.764, -1.16 ], [ -1.724, -1.484 ] ],
    	[ [ -1.492, 1.156 ], [ 0.56, -0.892 ], [ -0.384, -1.256 ], [ 1.62, 1.24 ], [ -0.804, -0.408 ], [ 1.108, 1.652 ], [ -0.312, -1.832 ], [ -0.628, 1.552 ] ]
    ]
    [
    	[ [ -1.116, -1.832 ], [ -1.724, -1.484 ], [ -1.516, 1.876 ], [ 0.7, -0.968 ], [ 1.324, -0.892 ], [ 1.956, 1.912 ], [ -0.472, 0.028 ], [ 1.208, 1.032 ] ],
    	[ [ -0.876, 0.344 ], [ -1.856, 0.812 ], [ 1.108, -0.408 ], [ 1.556, 1.776 ], [ 0.048, -0.804 ], [ 0.644, -1.808 ], [ 0.3, -1.616 ], [ 0.692, -1.028 ] ],
    	[ [ -1.176, -1.248 ], [ 1.048, 1.916 ], [ 1.156, -1.764 ], [ -0.504, 0.012 ], [ -0.316, -1.552 ], [ -0.032, -0.312 ], [ -1.256, -0.784 ], [ -0.608, 1.652 ] ],
    	[ [ -0.968, 1.356 ], [ 0.092, -0.628 ], [ 1.512, -1.656 ], [ 0.972, 0.52 ], [ 0.016, -1.688 ], [ -0.636, 0.56 ], [ -0.128, -0.768 ], [ 1.704, -1.58 ] ],
    	[ [ 0.496, -0.012 ], [ -2.0, -0.176 ], [ 0.82, -1.424 ], [ 0.636, -0.384 ], [ 1.64, -1.1 ], [ -0.888, -1.34 ], [ -0.808, -0.368 ], [ 1.24, 0.048 ] ],
    	[ [ -1.156, -0.068 ], [ 0.52, -1.72 ], [ -1.492, -0.856 ], [ 0.996, -1.476 ], [ 1.352, -1.228 ], [ 1.612, -1.572 ], [ 0.08, 0.788 ], [ -0.464, 1.628 ] ],
    	[ [ -0.712, -0.384 ], [ -0.804, -1.664 ], [ 1.556, -1.256 ], [ 1.288, 0.392 ], [ 1.368, 1.628 ], [ -1.16, -0.124 ], [ -0.556, 0.148 ], [ 1.524, 1.764 ] ],
    	[ [ 0.82, -0.852 ], [ -1.76, 1.42 ], [ -1.16, 1.552 ], [ 0.184, -1.456 ], [ -1.564, -0.892 ], [ -1.54, 1.62 ], [ 1.444, 0.688 ], [ 1.98, 0.66 ] ]
    ]
    [
    	[ [ -1.572, 0.996 ], [ 1.24, -0.032 ], [ -1.16, -1.516 ], [ -1.54, 1.916 ], [ 1.324, -1.832 ], [ -0.384, 1.156 ], [ -1.028, 1.552 ], [ 1.524, 1.64 ] ],
    	[ [ -1.256, -0.368 ], [ 0.812, 0.82 ], [ -0.464, -1.34 ], [ -1.58, -1.616 ], [ 1.628, 1.556 ], [ -1.16, -0.608 ], [ -0.856, -0.892 ], [ 1.512, -1.688 ] ],
    	[ [ 0.344, -0.804 ], [ -0.128, 0.496 ], [ 1.776, 1.956 ], [ 0.048, 0.016 ], [ -1.456, -0.968 ], [ -1.116, 1.556 ], [ 1.108, 0.972 ], [ -1.656, -1.156 ] ],
    	[ [ 1.652, 0.028 ], [ 1.628, -0.636 ], [ -0.504, 0.52 ], [ 0.7, 0.788 ], [ 1.42, -0.888 ], [ -0.768, 1.288 ], [ -1.76, -1.424 ], [ -0.876, -0.124 ] ],
    	[ [ 0.184, -1.256 ], [ -0.012, -0.712 ], [ 0.56, -1.248 ], [ -0.176, -1.492 ], [ 0.392, -1.176 ], [ 1.876, -0.628 ], [ -0.892, 1.704 ], [ -1.856, -1.664 ] ],
    	[ [ -0.068, -1.564 ], [ 0.636, 0.66 ], [ 1.368, 1.032 ], [ 0.688, 0.644 ], [ 0.3, 1.912 ], [ -0.472, 1.048 ], [ 1.444, 0.048 ], [ 1.208, -0.784 ] ],
    	[ [ -0.804, -0.312 ], [ 0.148, 0.52 ], [ -1.476, -1.72 ], [ 1.98, -1.484 ], [ 0.012, 0.82 ], [ -0.852, -1.228 ], [ -1.764, -0.808 ], [ -2.0, 1.62 ] ],
    	[ [ -0.408, -1.1 ], [ -0.556, 0.692 ], [ -0.384, 0.092 ], [ 1.352, 1.764 ], [ -0.316, -1.724 ], [ 0.08, 1.612 ], [ -1.808, -1.552 ], [ -0.968, 1.356 ] ]
    ]
    [
    	[ [ -1.476, -1.1 ], [ 1.368, 1.764 ], [ -1.34, 0.08 ], [ -1.808, 0.392 ], [ -1.156, -0.312 ], [ -1.116, 1.208 ], [ -1.54, 1.324 ], [ -0.128, -1.76 ] ],
    	[ [ -0.504, -0.068 ], [ -2.0, -0.852 ], [ 0.048, -1.856 ], [ -0.472, -1.228 ], [ 0.016, -0.384 ], [ 0.82, 0.52 ], [ -0.032, 0.3 ], [ 0.692, 1.24 ] ],
    	[ [ -0.636, -0.856 ], [ 0.496, 1.048 ], [ -1.656, 0.688 ], [ -1.552, 1.628 ], [ 0.52, -1.58 ], [ -0.804, 0.028 ], [ 1.956, -1.492 ], [ 0.344, 0.788 ] ],
    	[ [ -1.616, -0.316 ], [ -0.768, -1.72 ], [ -1.16, 0.56 ], [ 1.556, 1.62 ], [ 1.352, 1.42 ], [ 1.64, -0.784 ], [ 1.356, -0.804 ], [ -0.608, 0.148 ] ],
    	[ [ 0.66, -1.256 ], [ 0.636, 1.288 ], [ -0.888, 1.776 ], [ -1.424, -1.564 ], [ 0.812, 1.556 ], [ -0.384, 1.652 ], [ 0.012, -0.892 ], [ 0.996, -0.368 ] ],
    	[ [ 1.512, 0.092 ], [ -0.556, -1.16 ], [ -1.456, 0.972 ], [ 1.912, -1.724 ], [ -1.688, 1.444 ], [ -0.408, -1.572 ], [ 1.108, 1.876 ], [ 1.612, -0.892 ] ],
    	[ [ -1.248, 1.156 ], [ -0.124, -1.484 ], [ -0.012, 0.7 ], [ 1.552, 1.916 ], [ -1.028, 0.644 ], [ 1.628, -0.876 ], [ 0.82, 1.98 ], [ -1.256, -1.176 ] ],
    	[ [ -0.176, 1.524 ], [ 0.048, -0.968 ], [ -1.516, 0.184 ], [ -0.464, -0.808 ], [ -1.664, -1.832 ], [ -0.712, 1.032 ], [ -1.764, 1.704 ], [ -0.628, -0.968 ] ]
    ]
    [
    	[ [ 0.996, -0.176 ], [ -0.128, -1.484 ], [ 1.512, -1.1 ], [ -1.552, -1.228 ], [ 1.208, -0.768 ], [ -1.572, 1.048 ], [ -0.504, -1.492 ], [ -0.808, -1.256 ] ],
    	[ [ 1.556, -1.764 ], [ 1.62, 1.156 ], [ 1.98, -1.832 ], [ 0.688, -0.608 ], [ -0.384, -1.028 ], [ -0.892, -0.408 ], [ 0.344, 0.56 ], [ 0.66, -0.876 ] ],
    	[ [ -1.476, -0.384 ], [ -0.464, 0.092 ], [ 1.556, 1.956 ], [ -1.664, 1.776 ], [ 0.3, 0.52 ], [ -1.688, 1.524 ], [ -1.72, -1.456 ], [ -1.856, -1.34 ] ],
    	[ [ 0.812, -0.124 ], [ 1.64, 1.612 ], [ -1.564, 1.704 ], [ -1.76, 0.7 ], [ 1.764, -0.012 ], [ 0.028, -1.808 ], [ 0.692, -0.888 ], [ -1.248, 0.636 ] ],
    	[ [ 1.032, -0.032 ], [ 1.24, 1.108 ], [ -1.256, -1.16 ], [ 0.972, -1.58 ], [ 0.048, -0.892 ], [ 1.628, 0.048 ], [ -0.472, 1.552 ], [ -1.16, 1.652 ] ],
    	[ [ -0.784, 0.82 ], [ 0.52, 1.912 ], [ 0.788, 1.352 ], [ -0.316, 1.876 ], [ -1.516, 0.016 ], [ 1.916, -0.852 ], [ 1.444, -0.068 ], [ 0.82, -1.616 ] ],
    	[ [ -1.116, -0.628 ], [ 1.356, 0.012 ], [ -1.176, -1.156 ], [ -0.968, -0.804 ], [ 1.42, 0.496 ], [ -1.424, 1.324 ], [ 1.628, -0.368 ], [ -2.0, 0.392 ] ],
    	[ [ 0.08, 0.644 ], [ 1.288, -0.712 ], [ -1.656, -0.804 ], [ 0.148, -0.636 ], [ 1.368, -0.312 ], [ -1.724, -0.556 ], [ -0.968, -1.54 ], [ -0.856, 0.184 ] ]
    ]

Gradient Descent

First, we train using basic gradient descent method apply weak line search conditions.

TrainingTester.java:480 executed in 0.05 seconds (0.000 gc):

    IterativeTrainer iterativeTrainer = new IterativeTrainer(trainable.addRef());
    try {
      iterativeTrainer.setLineSearchFactory(label -> new ArmijoWolfeSearch());
      iterativeTrainer.setOrientation(new GradientDescent());
      iterativeTrainer.setMonitor(TrainingTester.getMonitor(history));
      iterativeTrainer.setTimeout(30, TimeUnit.SECONDS);
      iterativeTrainer.setMaxIterations(250);
      iterativeTrainer.setTerminateThreshold(0);
      return iterativeTrainer.run();
    } finally {
      iterativeTrainer.freeRef();
    }
Logging
Reset training subject: 1019151124380
BACKPROP_AGG_SIZE = 3
THREADS = 64
SINGLE_THREADED = false
Initialized CoreSettings = {
"backpropAggregationSize" : 3,
"jvmThreads" : 64,
"singleThreaded" : false
}
Final threshold in iteration 0: 0.0 (> 0.0) after 0.039s (< 30.000s)

Returns

    0.0

This training apply resulted in the following configuration:

TrainingTester.java:610 executed in 0.00 seconds (0.000 gc):

    RefList<double[]> state = network.state();
    assert state != null;
    String description = state.stream().map(RefArrays::toString).reduce((a, b) -> a + "\n" + b)
        .orElse("");
    state.freeRef();
    return description;

Returns

    

And regressed input:

TrainingTester.java:622 executed in 0.00 seconds (0.000 gc):

    return RefArrays.stream(RefUtil.addRef(data)).flatMap(x -> {
      return RefArrays.stream(x);
    }).limit(1).map(x -> {
      String temp_18_0015 = x.prettyPrint();
      x.freeRef();
      return temp_18_0015;
    }).reduce((a, b) -> a + "\n" + b).orElse("");

Returns

    [
    	[ [ 1.048, 0.3 ], [ 1.356, 0.048 ], [ -1.688, -1.16 ], [ -0.808, -0.408 ], [ 0.184, -0.968 ], [ 0.996, -0.316 ], [ 1.98, 0.692 ], [ 0.016, -0.888 ] ],
    	[ [ 0.148, 1.556 ], [ 0.66, -1.832 ], [ 0.636, 1.652 ], [ 1.628, -0.176 ], [ -0.012, 1.764 ], [ -0.128, -1.456 ], [ 1.62, -1.484 ], [ -0.464, -0.472 ] ],
    	[ [ -1.572, 1.628 ], [ 0.52, 1.208 ], [ -1.34, 1.352 ], [ 0.82, 0.028 ], [ -1.664, -1.228 ], [ 1.24, 0.644 ], [ -2.0, -1.424 ], [ 1.108, -1.16 ] ],
    	[ [ 0.972, -0.852 ], [ 1.776, 0.812 ], [ -0.124, 0.52 ], [ -1.476, -1.724 ], [ 1.876, -1.176 ], [ -0.628, -0.892 ], [ 0.82, -0.504 ], [ -1.552, -1.54 ] ],
    	[ [ -1.116, -1.72 ], [ -1.028, 0.788 ], [ 1.156, 1.524 ], [ 1.288, -1.1 ], [ -1.76, -0.804 ], [ -0.804, -0.556 ], [ -1.764, -1.156 ], [ 1.556, 1.444 ] ],
    	[ [ 0.496, -0.892 ], [ -0.968, -1.516 ], [ -0.856, 1.956 ], [ 1.612, -0.032 ], [ 1.42, -0.608 ], [ -1.256, -0.068 ], [ -0.784, -0.712 ], [ 0.048, 1.916 ] ],
    	[ [ 0.092, -1.58 ], [ -0.768, -0.312 ], [ -0.384, -0.636 ], [ -1.492, 0.7 ], [ 1.032, -0.384 ], [ -0.876, -0.368 ], [ -1.564, -1.808 ], [ -1.656, 1.64 ] ],
    	[ [ 1.324, 1.704 ], [ 1.512, 0.392 ], [ 0.344, -1.248 ], [ 1.552, -1.256 ], [ 0.08, 1.368 ], [ 1.912, 0.688 ], [ 0.56, -1.856 ], [ -1.616, 0.012 ] ]
    ]

To produce the following output:

TrainingTester.java:633 executed in 0.00 seconds (0.000 gc):

    Result[] array = ConstantResult.batchResultArray(pop(RefUtil.addRef(data)));
    @Nullable
    Result eval = layer.eval(array);
    assert eval != null;
    TensorList tensorList = Result.getData(eval);
    String temp_18_0016 = tensorList.stream().limit(1).map(x -> {
      String temp_18_0017 = x.prettyPrint();
      x.freeRef();
      return temp_18_0017;
    }).reduce((a, b) -> a + "\n" + b).orElse("");
    tensorList.freeRef();
    return temp_18_0016;

Returns

    [
    	[ [ 0.0, 0.0 ], [ 0.0, 0.0 ] ],
    	[ [ 0.0, 0.0 ], [ 0.0, 0.0 ] ],
    	[ [ 0.0, 0.0 ], [ 0.0, 0.0 ] ]
    ]

Conjugate Gradient Descent

First, we use a conjugate gradient descent method, which converges the fastest for purely linear functions.

TrainingTester.java:452 executed in 0.01 seconds (0.000 gc):

    IterativeTrainer iterativeTrainer = new IterativeTrainer(trainable.addRef());
    try {
      iterativeTrainer.setLineSearchFactory(label -> new QuadraticSearch());
      iterativeTrainer.setOrientation(new GradientDescent());
      iterativeTrainer.setMonitor(TrainingTester.getMonitor(history));
      iterativeTrainer.setTimeout(30, TimeUnit.SECONDS);
      iterativeTrainer.setMaxIterations(250);
      iterativeTrainer.setTerminateThreshold(0);
      return iterativeTrainer.run();
    } finally {
      iterativeTrainer.freeRef();
    }
Logging
Reset training subject: 1019207694428
Final threshold in iteration 0: 0.0 (> 0.0) after 0.007s (< 30.000s)

Returns

    0.0

This training apply resulted in the following configuration:

TrainingTester.java:610 executed in 0.00 seconds (0.000 gc):

    RefList<double[]> state = network.state();
    assert state != null;
    String description = state.stream().map(RefArrays::toString).reduce((a, b) -> a + "\n" + b)
        .orElse("");
    state.freeRef();
    return description;

Returns

    

And regressed input:

TrainingTester.java:622 executed in 0.00 seconds (0.000 gc):

    return RefArrays.stream(RefUtil.addRef(data)).flatMap(x -> {
      return RefArrays.stream(x);
    }).limit(1).map(x -> {
      String temp_18_0015 = x.prettyPrint();
      x.freeRef();
      return temp_18_0015;
    }).reduce((a, b) -> a + "\n" + b).orElse("");

Returns

    [
    	[ [ 1.048, 0.3 ], [ 1.356, 0.048 ], [ -1.688, -1.16 ], [ -0.808, -0.408 ], [ 0.184, -0.968 ], [ 0.996, -0.316 ], [ 1.98, 0.692 ], [ 0.016, -0.888 ] ],
    	[ [ 0.148, 1.556 ], [ 0.66, -1.832 ], [ 0.636, 1.652 ], [ 1.628, -0.176 ], [ -0.012, 1.764 ], [ -0.128, -1.456 ], [ 1.62, -1.484 ], [ -0.464, -0.472 ] ],
    	[ [ -1.572, 1.628 ], [ 0.52, 1.208 ], [ -1.34, 1.352 ], [ 0.82, 0.028 ], [ -1.664, -1.228 ], [ 1.24, 0.644 ], [ -2.0, -1.424 ], [ 1.108, -1.16 ] ],
    	[ [ 0.972, -0.852 ], [ 1.776, 0.812 ], [ -0.124, 0.52 ], [ -1.476, -1.724 ], [ 1.876, -1.176 ], [ -0.628, -0.892 ], [ 0.82, -0.504 ], [ -1.552, -1.54 ] ],
    	[ [ -1.116, -1.72 ], [ -1.028, 0.788 ], [ 1.156, 1.524 ], [ 1.288, -1.1 ], [ -1.76, -0.804 ], [ -0.804, -0.556 ], [ -1.764, -1.156 ], [ 1.556, 1.444 ] ],
    	[ [ 0.496, -0.892 ], [ -0.968, -1.516 ], [ -0.856, 1.956 ], [ 1.612, -0.032 ], [ 1.42, -0.608 ], [ -1.256, -0.068 ], [ -0.784, -0.712 ], [ 0.048, 1.916 ] ],
    	[ [ 0.092, -1.58 ], [ -0.768, -0.312 ], [ -0.384, -0.636 ], [ -1.492, 0.7 ], [ 1.032, -0.384 ], [ -0.876, -0.368 ], [ -1.564, -1.808 ], [ -1.656, 1.64 ] ],
    	[ [ 1.324, 1.704 ], [ 1.512, 0.392 ], [ 0.344, -1.248 ], [ 1.552, -1.256 ], [ 0.08, 1.368 ], [ 1.912, 0.688 ], [ 0.56, -1.856 ], [ -1.616, 0.012 ] ]
    ]

To produce the following output:

TrainingTester.java:633 executed in 0.00 seconds (0.000 gc):

    Result[] array = ConstantResult.batchResultArray(pop(RefUtil.addRef(data)));
    @Nullable
    Result eval = layer.eval(array);
    assert eval != null;
    TensorList tensorList = Result.getData(eval);
    String temp_18_0016 = tensorList.stream().limit(1).map(x -> {
      String temp_18_0017 = x.prettyPrint();
      x.freeRef();
      return temp_18_0017;
    }).reduce((a, b) -> a + "\n" + b).orElse("");
    tensorList.freeRef();
    return temp_18_0016;

Returns

    [
    	[ [ 0.0, 0.0 ], [ 0.0, 0.0 ] ],
    	[ [ 0.0, 0.0 ], [ 0.0, 0.0 ] ],
    	[ [ 0.0, 0.0 ], [ 0.0, 0.0 ] ]
    ]

Limited-Memory BFGS

Next, we apply the same optimization using L-BFGS, which is nearly ideal for purely second-order or quadratic functions.

TrainingTester.java:509 executed in 0.01 seconds (0.000 gc):

    IterativeTrainer iterativeTrainer = new IterativeTrainer(trainable.addRef());
    try {
      iterativeTrainer.setLineSearchFactory(label -> new ArmijoWolfeSearch());
      iterativeTrainer.setOrientation(new LBFGS());
      iterativeTrainer.setMonitor(TrainingTester.getMonitor(history));
      iterativeTrainer.setTimeout(30, TimeUnit.SECONDS);
      iterativeTrainer.setIterationsPerSample(100);
      iterativeTrainer.setMaxIterations(250);
      iterativeTrainer.setTerminateThreshold(0);
      return iterativeTrainer.run();
    } finally {
      iterativeTrainer.freeRef();
    }
Logging
Reset training subject: 1019226174990
Final threshold in iteration 0: 0.0 (> 0.0) after 0.006s (< 30.000s)

Returns

    0.0

This training apply resulted in the following configuration:

TrainingTester.java:610 executed in 0.00 seconds (0.000 gc):

    RefList<double[]> state = network.state();
    assert state != null;
    String description = state.stream().map(RefArrays::toString).reduce((a, b) -> a + "\n" + b)
        .orElse("");
    state.freeRef();
    return description;

Returns

    

And regressed input:

TrainingTester.java:622 executed in 0.00 seconds (0.000 gc):

    return RefArrays.stream(RefUtil.addRef(data)).flatMap(x -> {
      return RefArrays.stream(x);
    }).limit(1).map(x -> {
      String temp_18_0015 = x.prettyPrint();
      x.freeRef();
      return temp_18_0015;
    }).reduce((a, b) -> a + "\n" + b).orElse("");

Returns

    [
    	[ [ 1.048, 0.3 ], [ 1.356, 0.048 ], [ -1.688, -1.16 ], [ -0.808, -0.408 ], [ 0.184, -0.968 ], [ 0.996, -0.316 ], [ 1.98, 0.692 ], [ 0.016, -0.888 ] ],
    	[ [ 0.148, 1.556 ], [ 0.66, -1.832 ], [ 0.636, 1.652 ], [ 1.628, -0.176 ], [ -0.012, 1.764 ], [ -0.128, -1.456 ], [ 1.62, -1.484 ], [ -0.464, -0.472 ] ],
    	[ [ -1.572, 1.628 ], [ 0.52, 1.208 ], [ -1.34, 1.352 ], [ 0.82, 0.028 ], [ -1.664, -1.228 ], [ 1.24, 0.644 ], [ -2.0, -1.424 ], [ 1.108, -1.16 ] ],
    	[ [ 0.972, -0.852 ], [ 1.776, 0.812 ], [ -0.124, 0.52 ], [ -1.476, -1.724 ], [ 1.876, -1.176 ], [ -0.628, -0.892 ], [ 0.82, -0.504 ], [ -1.552, -1.54 ] ],
    	[ [ -1.116, -1.72 ], [ -1.028, 0.788 ], [ 1.156, 1.524 ], [ 1.288, -1.1 ], [ -1.76, -0.804 ], [ -0.804, -0.556 ], [ -1.764, -1.156 ], [ 1.556, 1.444 ] ],
    	[ [ 0.496, -0.892 ], [ -0.968, -1.516 ], [ -0.856, 1.956 ], [ 1.612, -0.032 ], [ 1.42, -0.608 ], [ -1.256, -0.068 ], [ -0.784, -0.712 ], [ 0.048, 1.916 ] ],
    	[ [ 0.092, -1.58 ], [ -0.768, -0.312 ], [ -0.384, -0.636 ], [ -1.492, 0.7 ], [ 1.032, -0.384 ], [ -0.876, -0.368 ], [ -1.564, -1.808 ], [ -1.656, 1.64 ] ],
    	[ [ 1.324, 1.704 ], [ 1.512, 0.392 ], [ 0.344, -1.248 ], [ 1.552, -1.256 ], [ 0.08, 1.368 ], [ 1.912, 0.688 ], [ 0.56, -1.856 ], [ -1.616, 0.012 ] ]
    ]

To produce the following output:

TrainingTester.java:633 executed in 0.00 seconds (0.000 gc):

    Result[] array = ConstantResult.batchResultArray(pop(RefUtil.addRef(data)));
    @Nullable
    Result eval = layer.eval(array);
    assert eval != null;
    TensorList tensorList = Result.getData(eval);
    String temp_18_0016 = tensorList.stream().limit(1).map(x -> {
      String temp_18_0017 = x.prettyPrint();
      x.freeRef();
      return temp_18_0017;
    }).reduce((a, b) -> a + "\n" + b).orElse("");
    tensorList.freeRef();
    return temp_18_0016;

Returns

    [
    	[ [ 0.0, 0.0 ], [ 0.0, 0.0 ] ],
    	[ [ 0.0, 0.0 ], [ 0.0, 0.0 ] ],
    	[ [ 0.0, 0.0 ], [ 0.0, 0.0 ] ]
    ]

TrainingTester.java:432 executed in 0.01 seconds (0.000 gc):

    return TestUtil.compare(title + " vs Iteration", runs);
Logging
No Data

TrainingTester.java:435 executed in 0.00 seconds (0.000 gc):

    return TestUtil.compareTime(title + " vs Time", runs);
Logging
No Data

Results

TrainingTester.java:255 executed in 0.04 seconds (0.000 gc):

    return grid(inputLearning, modelLearning, completeLearning);

Returns

Result

TrainingTester.java:258 executed in 0.00 seconds (0.000 gc):

    return new ComponentResult(null == inputLearning ? null : inputLearning.value,
        null == modelLearning ? null : modelLearning.value, null == completeLearning ? null : completeLearning.value);

Returns

    {"input":{ "LBFGS": { "type": "NonConverged", "value": NaN }, "CjGD": { "type": "NonConverged", "value": NaN }, "GD": { "type": "NonConverged", "value": NaN } }, "model":null, "complete":null}

LayerTests.java:425 executed in 0.00 seconds (0.000 gc):

    throwException(exceptions.addRef());

Results

detailsresult
{"input":{ "LBFGS": { "type": "NonConverged", "value": NaN }, "CjGD": { "type": "NonConverged", "value": NaN }, "GD": { "type": "NonConverged", "value": NaN } }, "model":null, "complete":null}OK
  {
    "result": "OK",
    "performance": {
      "execution_time": "0.485",
      "gc_time": "0.164"
    },
    "created_on": 1586735606943,
    "file_name": "trainingTest",
    "report": {
      "simpleName": "RotatedChannels",
      "canonicalName": "com.simiacryptus.mindseye.layers.java.ImgViewLayerTest.RotatedChannels",
      "link": "https://github.com/SimiaCryptus/mindseye-java/tree/93db34cedee48c0202777a2b25deddf1dfaf5731/src/test/java/com/simiacryptus/mindseye/layers/java/ImgViewLayerTest.java",
      "javaDoc": ""
    },
    "training_analysis": {
      "input": {
        "LBFGS": {
          "type": "NonConverged",
          "value": "NaN"
        },
        "CjGD": {
          "type": "NonConverged",
          "value": "NaN"
        },
        "GD": {
          "type": "NonConverged",
          "value": "NaN"
        }
      }
    },
    "archive": "s3://code.simiacrypt.us/tests/com/simiacryptus/mindseye/layers/java/ImgViewLayer/RotatedChannels/trainingTest/202004125326",
    "id": "530ee095-7516-4d2b-bf64-a4988c263e67",
    "report_type": "Components",
    "display_name": "Comparative Training",
    "target": {
      "simpleName": "ImgViewLayer",
      "canonicalName": "com.simiacryptus.mindseye.layers.java.ImgViewLayer",
      "link": "https://github.com/SimiaCryptus/mindseye-java/tree/93db34cedee48c0202777a2b25deddf1dfaf5731/src/main/java/com/simiacryptus/mindseye/layers/java/ImgViewLayer.java",
      "javaDoc": ""
    }
  }