1. Test Modules
  2. Training Characteristics
    1. Input Learning
      1. Gradient Descent
      2. Conjugate Gradient Descent
      3. Limited-Memory BFGS
    2. Results
  3. Results

Subreport: Logs for com.simiacryptus.ref.lang.ReferenceCountingBase

Test Modules

Using Seed 3582629201641122816

Training Characteristics

Input Learning

In this apply, we use a network to learn this target input, given it's pre-evaluated output:

TrainingTester.java:332 executed in 0.05 seconds (0.000 gc):

    return RefArrays.stream(RefUtil.addRef(input_target)).flatMap(RefArrays::stream).map(x -> {
      try {
        return x.prettyPrint();
      } finally {
        x.freeRef();
      }
    }).reduce((a, b) -> a + "\n" + b).orElse("");

Returns

    [
    	[ [ 1.216, 0.784, 0.424 ], [ -1.264, -0.848, -0.164 ], [ -0.588, 0.22, -0.388 ], [ -1.824, 0.708, -1.892 ], [ 0.196, -1.268, -0.308 ], [ -0.316, 1.076, 0.388 ], [ 1.4, -1.88, -1.84 ], [ 1.68, -1.228, 1.828 ], ... ],
    	[ [ -0.236, -0.96, -0.924 ], [ 0.412, -1.976, -0.684 ], [ -1.76, -1.2, 1.292 ], [ -0.52, 1.62, 0.06 ], [ 1.992, -0.648, -1.58 ], [ -1.172, 0.716, -0.744 ], [ -1.104, 1.128, 1.796 ], [ -1.184, 0.292, -1.284 ], ... ],
    	[ [ -1.056, 0.404, 1.964 ], [ 1.54, 1.332, 1.916 ], [ 0.392, 1.204, -0.288 ], [ 1.92, -1.608, 0.492 ], [ 1.288, -0.812, 1.28 ], [ -0.14, -0.46, 0.168 ], [ -1.476, -1.336, -0.872 ], [ -0.58, 0.208, -1.68 ], ... ],
    	[ [ 0.352, 1.072, 0.12 ], [ -1.144, 1.488, -0.832 ], [ -0.896, 0.072, 0.212 ], [ 1.316, -1.472, 1.12 ], [ -0.832, 1.276, 0.128 ], [ -0.864, -1.656, -0.7 ], [ -0.572, -1.544, -0.232 ], [ 0.952, 0.176, -0.06 ], ... ],
    	[ [ 0.936, 0.052, 0.236 ], [ -0.72, -0.668, -1.808 ], [ 0.112, 1.624, -0.808 ], [ 0.928, -1.596, -0.032 ], [ 1.576, 1.436, -0.412 ], [ -0.22, 0.816, 1.364 ], [ 1.732, -0.872, 0.196 ], [ 1.872, 0.316, 1.864 ], ... ],
    	[ [ 1.116, -1.324, -1.408 ], [ 0.576, 1.564, 0.98 ], [ -0.964, -1.816, 1.988 ], [ 1.588, -0.492, 1.18 ], [ 0.924, -1.568, -0.92 ], [ -0.792, 1.996, 1.232 ], [ 1.54, 0.224, -1.224 ], [ -0.8, -1.512, 0.484 ], ... ],
    	[ [ 1.068, -1.364, 0.256 ], [ -0.232, 0.204, -0.296 ], [ -0.264, -0.924, 0.812 ], [ -1.512, -0.372, 0.576 ], [ 0.428, -0.104, 0.796 ], [ -1.212, -1.32, -1.692 ], [ 1.68, -0.124, 0.304 ], [ 1.416, 0.576, -1.908 ], ... ],
    	[ [ 1.764, 1.06, -0.988 ], [ 1.196, 1.332, -0.568 ], [ -0.168, -1.56, -1.904 ], [ -0.204, 0.6, 1.7 ], [ -0.044, -1.432, 0.276 ], [ -0.176, -0.952, 1.028 ], [ -1.224, 1.344, -1.452 ], [ 0.108, -0.436, -0.912 ], ... ],
    	...
    ]
    [
    	[ [ 1.572, 0.852, -1.096 ], [ -0.364, -1.14, 0.696 ], [ -0.528, -0.836, -0.136 ], [ 1.736, 0.408, -1.06 ], [ 1.524, 1.084, -1.636 ], [ -0.5, 1.72, 1.264 ], [ -1.628, -1.984, -1.232 ], [ 0.528, -1.276, 0.488 ], ... ],
    	[ [ -1.712, 0.664, 1.008 ], [ -1.344, 1.052, 0.632 ], [ 0.524, -1.24, 1.8 ], [ 0.08, -1.196, -1.196 ], [ -1.732, -1.112, -0.372 ], [ 0.836, 0.324, 0.788 ], [ 0.528, -0.232, 1.54 ], [ 1.616, 1.484, -0.716 ], ... ],
    	[ [ -1.272, -0.228, -0.364 ], [ 0.072, 0.608, -0.076 ], [ -0.728, -1.716, -0.844 ], [ -1.476, 0.508, -0.996 ], [ -0.98, 1.22, -0.932 ], [ -1.368, 0.256, 1.604 ], [ -0.4, -0.124, 1.22 ], [ 1.04, 1.2, 1.048 ], ... ],
    	[ [ -0.74, -1.508, 1.98 ], [ 0.376, 1.908, 1.448 ], [ -0.932, 1.848, 0.088 ], [ -1.38, -1.36, 0.184 ], [ -0.94, -1.496, -0.64 ], [ -0.332, 0.444, 0.824 ], [ -0.992, 1.72, 0.82 ], [ -0.28, 1.608, 0.676 ], ... ],
    	[ [ 0.944, 1.388, -1.212 ], [ -0.324, 0.7, 1.708 ], [ -0.744, -0.608, 1.004 ], [ 1.736, 1.224, -1.532 ], [ 0.724, -1.276, -0.148 ], [ 1.972, 1.316, 1.436 ], [ 1.796, 1.872, 1.684 ], [ -0.428, 0.852, -1.664 ], ... ],
    	[ [ 0.98, -1.604, -1.18 ], [ 0.712, -0.684, -1.996 ], [ 0.768, -1.12, 0.816 ], [ 0.852, 0.216, -0.244 ], [ 0.848, -1.588, -0.06 ], [ -1.236, 1.228, 1.0 ], [ -0.244, 1.64, 1.6 ], [ 1.416, 0.052, -0.648 ], ... ],
    	[ [ 1.368, -1.268, -0.708 ], [ 0.204, 0.632, -1.244 ], [ -1.552, 1.3, 0.792 ], [ 1.868, 1.4, -0.884 ], [ 0.672, 0.288, -0.58 ], [ 0.18, 1.812, -1.468 ], [ 0.96, -1.128, -1.024 ], [ -0.744, 1.24, -1.576 ], ... ],
    	[ [ -1.808, -1.8, -0.32 ], [ -1.28, 0.212, 1.768 ], [ 1.728, 1.78, 0.0 ], [ 0.304, -1.76, -1.188 ], [ 0.124, 0.3, -0.644 ], [ -0.872, 1.952, -0.264 ], [ 0.412, -0.696, -1.652 ], [ 0.272, -1.632, -1.996 ], ... ],
    	...
    ]
    [
    	[ [ 1.116, -0.888, 1.196 ], [ 1.776, 1.292, -0.56 ], [ 0.904, 1.212, -0.036 ], [ -1.268, -0.864, -0.3 ], [ -1.392, 0.984, 0.384 ], [ -1.668, -1.784, 0.912 ], [ -0.872, 0.22, -1.364 ], [ 0.28, 0.792, -0.164 ], ... ],
    	[ [ -1.312, -1.788, 1.028 ], [ -1.656, 1.304, -1.6 ], [ -0.088, 0.856, -1.724 ], [ -1.292, -0.696, -0.636 ], [ -0.324, -0.316, 1.064 ], [ 0.02, -0.684, -0.484 ], [ 0.272, -0.02, -0.436 ], [ -0.696, -0.524, -0.516 ], ... ],
    	[ [ -1.132, 1.884, -1.42 ], [ -0.808, 0.44, -0.836 ], [ 1.156, 0.564, -1.484 ], [ 0.444, -0.352, -0.392 ], [ -1.076, 0.416, -1.208 ], [ -0.344, -0.276, 0.568 ], [ 1.036, -1.98, -1.448 ], [ 1.172, -1.744, 1.1 ], ... ],
    	[ [ -1.352, 0.384, 1.132 ], [ -0.752, 0.08, 0.82 ], [ 0.128, -0.624, 0.036 ], [ -0.036, -0.716, -1.288 ], [ 1.236, -1.548, 0.228 ], [ 0.732, -1.612, -0.084 ], [ -1.052, -1.704, 1.908 ], [ 0.776, -0.896, -1.996 ], ... ],
    	[ [ -1.344, -0.232, 0.128 ], [ 1.464, 0.232, -1.372 ], [ -1.192, 1.956, 1.092 ], [ -0.768, 1.092, -0.096 ], [ -0.988, -0.948, 0.528 ], [ -0.476, 0.224, 1.82 ], [ 0.424, 0.508, 0.06 ], [ -1.676, 0.408, 0.744 ], ... ],
    	[ [ -0.8, 0.804, 1.672 ], [ -1.748, -1.504, 1.676 ], [ -0.72, 1.144, 1.312 ], [ -0.436, 0.532, 0.016 ], [ -0.336, 0.972, -0.548 ], [ -0.684, -0.02, -1.832 ], [ 0.232, -0.924, 1.696 ], [ 0.908, 0.924, 0.236 ], ... ],
    	[ [ 0.244, -0.32, -1.304 ], [ -1.584, 0.1, 1.224 ], [ -0.988, -1.02, -1.376 ], [ -0.836, 0.804, 1.844 ], [ 0.16, -0.32, 1.16 ], [ 1.384, -1.972, -1.744 ], [ -0.136, 1.752, -0.248 ], [ -0.192, 1.124, -0.716 ], ... ],
    	[ [ -0.184, -1.636, -1.104 ], [ -0.364, 1.448, -1.2 ], [ -0.988, -1.28, 1.724 ], [ -0.48, 0.16, -1.832 ], [ -1.06, 1.944, 0.796 ], [ 0.388, -0.192, 0.616 ], [ -0.192, 1.236, -1.156 ], [ 0.104, 0.0, 0.36 ], ... ],
    	...
    ]
    [
    	[ [ -0.944, 0.24, -1.58 ], [ 0.936, 1.436, -1.18 ], [ 0.54, -0.18, -0.036 ], [ -0.048, -1.452, -0.08 ], [ 0.264, -1.26, -1.552 ], [ 0.016, 1.988, 0.048 ], [ -0.004, -0.588, -0.648 ], [ 1.76, 1.896, 0.688 ], ... ],
    	[ [ -1.672, 1.52, 0.32 ], [ 0.616, -0.164, -1.492 ], [ 1.556, 0.516, -0.816 ], [ 0.88, -0.104, -1.872 ], [ 1.144, 0.568, -1.116 ], [ 0.708, 1.036, 0.62 ], [ 1.792, 1.308, -1.516 ], [ 1.316, -1.836, 1.968 ], ... ],
    	[ [ 0.14, 0.332, -1.528 ], [ 0.46, 0.036, 1.616 ], [ -1.632, -1.336, 0.716 ], [ -1.196, 1.596, 1.712 ], [ 1.532, -1.468, -0.588 ], [ -1.444, 0.624, 0.916 ], [ 0.516, -0.132, -1.884 ], [ -1.416, 1.316, 0.916 ], ... ],
    	[ [ -1.048, 1.916, -0.352 ], [ -0.796, 1.46, -0.44 ], [ -1.288, -0.4, -1.964 ], [ 0.016, 0.684, 1.704 ], [ -0.212, 1.056, 0.156 ], [ -0.448, 0.244, -1.176 ], [ 0.12, 1.264, 1.192 ], [ 1.024, 1.976, -0.168 ], ... ],
    	[ [ 0.092, 1.536, 0.416 ], [ 0.608, -0.304, -0.152 ], [ -1.968, -1.968, 1.984 ], [ -1.2, -0.996, -1.892 ], [ -1.552, 0.924, -0.324 ], [ -0.908, -1.9, 1.088 ], [ -0.756, -0.224, 0.86 ], [ -0.376, 0.396, 0.98 ], ... ],
    	[ [ 1.204, -0.068, -1.2 ], [ -1.644, -1.868, -0.532 ], [ -0.108, -1.524, 0.504 ], [ -1.84, -1.548, 1.108 ], [ -0.5, 1.228, -0.996 ], [ -1.704, -1.828, -1.076 ], [ -1.388, 0.188, -1.704 ], [ 0.496, -0.932, -0.164 ], ... ],
    	[ [ -0.768, -0.216, 1.356 ], [ -0.308, 1.076, 1.428 ], [ -0.996, 0.816, -1.02 ], [ 1.636, 0.452, -1.032 ], [ -0.564, 0.716, 0.616 ], [ -1.592, 0.588, -0.18 ], [ -1.384, 1.236, -0.62 ], [ -0.052, -0.392, -1.14 ], ... ],
    	[ [ -0.072, 1.44, 1.048 ], [ 1.688, 1.608, 1.848 ], [ -1.828, -1.132, -0.256 ], [ -0.948, -0.9, 0.904 ], [ 0.24, -0.72, 0.232 ], [ 1.7, 0.016, 0.224 ], [ -1.204, 1.74, -1.676 ], [ -0.74, -0.06, -0.092 ], ... ],
    	...
    ]
    [
    	[ [ -0.576, -1.688, 1.944 ], [ 0.812, -0.66, 0.332 ], [ -0.272, -1.62, 1.668 ], [ 0.524, 1.828, 1.376 ], [ -0.532, 0.832, 1.22 ], [ 1.684, 1.436, 0.692 ], [ -0.492, 1.26, -0.764 ], [ 1.844, -1.668, 0.188 ], ... ],
    	[ [ -0.872, -0.432, 0.168 ], [ 1.34, -0.496, -0.864 ], [ -0.336, 0.944, 1.844 ], [ 1.408, 1.876, -0.764 ], [ -1.008, -1.744, 0.348 ], [ -0.376, -1.164, 1.408 ], [ 1.108, -0.084, 1.648 ], [ -0.76, -1.684, 0.456 ], ... ],
    	[ [ -0.472, 1.12, 1.248 ], [ -1.512, 0.52, 0.536 ], [ -0.744, -0.84, -0.944 ], [ -1.768, 1.868, 0.98 ], [ -1.368, 1.868, 1.264 ], [ 1.76, 1.628, -0.332 ], [ 0.888, -0.484, 0.396 ], [ -1.992, 1.824, -1.248 ], ... ],
    	[ [ 0.768, 1.228, 0.48 ], [ -1.324, 0.352, -1.792 ], [ 1.044, -1.852, 0.224 ], [ 1.396, 0.344, 1.164 ], [ 1.724, 1.964, 0.804 ], [ -0.476, -0.228, 0.972 ], [ 1.58, 1.348, 0.628 ], [ 0.416, 0.408, -1.832 ], ... ],
    	[ [ 0.056, -0.512, 0.42 ], [ 0.332, -0.1, -1.016 ], [ -0.572, 1.704, -1.584 ], [ -0.876, 0.28, 0.96 ], [ 0.832, -0.568, 0.084 ], [ -0.164, 0.484, 1.876 ], [ -1.404, 0.996, 1.656 ], [ -0.456, 0.168, 0.384 ], ... ],
    	[ [ -1.552, -1.808, -1.376 ], [ -1.956, -0.188, 0.444 ], [ -1.532, 0.312, 1.86 ], [ 1.752, 0.62, 0.724 ], [ 0.624, 1.144, -0.62 ], [ -1.44, -1.016, -0.872 ], [ 0.496, 0.748, 0.32 ], [ -1.092, 1.92, 1.856 ], ... ],
    	[ [ -0.284, -0.364, 0.516 ], [ -0.12, -0.82, 1.8 ], [ 1.252, 1.048, 0.948 ], [ -1.048, -1.808, -1.192 ], [ 0.576, 1.964, -1.292 ], [ -1.3, 0.116, -0.848 ], [ -0.66, -1.44, -1.612 ], [ -1.74, -1.88, -1.696 ], ... ],
    	[ [ 0.664, -1.576, 1.48 ], [ 0.388, 1.912, 0.52 ], [ 1.016, -1.216, -0.176 ], [ 1.056, 0.144, -0.26 ], [ 1.552, -1.656, 1.64 ], [ 0.024, -1.992, -1.336 ], [ -0.444, 1.5, 0.24 ], [ -1.42, 0.732, 1.416 ], ... ],
    	...
    ]

Gradient Descent

First, we train using basic gradient descent method apply weak line search conditions.

TrainingTester.java:480 executed in 0.13 seconds (0.000 gc):

    IterativeTrainer iterativeTrainer = new IterativeTrainer(trainable.addRef());
    try {
      iterativeTrainer.setLineSearchFactory(label -> new ArmijoWolfeSearch());
      iterativeTrainer.setOrientation(new GradientDescent());
      iterativeTrainer.setMonitor(TrainingTester.getMonitor(history));
      iterativeTrainer.setTimeout(30, TimeUnit.SECONDS);
      iterativeTrainer.setMaxIterations(250);
      iterativeTrainer.setTerminateThreshold(0);
      return iterativeTrainer.run();
    } finally {
      iterativeTrainer.freeRef();
    }
Logging
Reset training subject: 2048837510442
Final threshold in iteration 0: 0.0 (> 0.0) after 0.122s (< 30.000s)

Returns

    0.0

This training apply resulted in the following configuration:

TrainingTester.java:610 executed in 0.00 seconds (0.000 gc):

    RefList<double[]> state = network.state();
    assert state != null;
    String description = state.stream().map(RefArrays::toString).reduce((a, b) -> a + "\n" + b)
        .orElse("");
    state.freeRef();
    return description;

Returns

    

And regressed input:

TrainingTester.java:622 executed in 0.01 seconds (0.000 gc):

    return RefArrays.stream(RefUtil.addRef(data)).flatMap(x -> {
      return RefArrays.stream(x);
    }).limit(1).map(x -> {
      String temp_18_0015 = x.prettyPrint();
      x.freeRef();
      return temp_18_0015;
    }).reduce((a, b) -> a + "\n" + b).orElse("");

Returns

    [
    	[ [ -0.336, -1.956, -0.632 ], [ 1.868, -1.34, -1.416 ], [ 0.252, 1.992, 0.316 ], [ 0.704, -0.54, 1.7 ], [ -1.636, 1.32, -1.768 ], [ -0.404, 0.74, 0.524 ], [ 0.192, 0.632, -1.736 ], [ -0.528, -0.204, 0.84 ], ... ],
    	[ [ -0.164, 1.844, -0.7 ], [ -1.1, -1.544, 0.128 ], [ 1.8, 1.808, 0.516 ], [ -1.9, 1.612, 1.204 ], [ 0.972, -0.936, -1.232 ], [ 0.684, 0.112, 0.332 ], [ 0.236, 1.78, 1.312 ], [ 0.904, 0.44, 1.004 ], ... ],
    	[ [ -1.016, 0.064, 1.352 ], [ -0.752, -0.896, 1.752 ], [ -1.072, 0.652, 1.416 ], [ -0.416, 1.828, -1.048 ], [ 1.368, 0.28, 0.916 ], [ -1.188, -1.212, -1.988 ], [ -1.904, 1.048, -0.708 ], [ 0.228, 1.868, -1.94 ], ... ],
    	[ [ -0.212, 0.356, -0.892 ], [ 1.416, 1.34, 0.828 ], [ -1.204, -1.436, 0.472 ], [ 0.692, 0.552, 1.156 ], [ 0.228, 1.028, -1.688 ], [ 1.808, -1.812, 1.432 ], [ -0.908, 1.84, 1.044 ], [ 1.956, 0.848, -1.26 ], ... ],
    	[ [ -0.604, -0.8, -1.632 ], [ -0.056, -0.196, -0.276 ], [ 1.648, 0.52, -1.204 ], [ 1.056, -0.256, -0.644 ], [ 1.064, -0.208, -0.612 ], [ 0.34, -0.616, -0.732 ], [ 0.948, -0.944, 1.772 ], [ -0.508, 0.932, 1.3 ], ... ],
    	[ [ 0.036, -0.46, -1.296 ], [ 0.468, 0.812, 1.068 ], [ 0.564, 0.988, 0.74 ], [ 0.72, 0.836, -1.592 ], [ 1.748, -1.628, -1.808 ], [ -0.948, 1.388, 0.132 ], [ -0.172, 0.48, 1.44 ], [ 1.292, -0.588, -1.832 ], ... ],
    	[ [ -0.384, 0.172, -0.508 ], [ -1.4, -0.536, 1.936 ], [ 0.384, 0.76, -1.72 ], [ -0.396, 1.916, 1.076 ], [ -0.42, -1.692, 0.644 ], [ 0.228, -1.988, 1.492 ], [ -1.824, 2.0, -0.904 ], [ 0.628, 1.0, 0.42 ], ... ],
    	[ [ -0.552, -1.12, 0.068 ], [ -1.328, -1.224, 1.36 ], [ 0.016, 0.012, 1.316 ], [ 0.3, -0.808, 0.72 ], [ 1.204, -0.296, -0.1 ], [ 1.944, 0.38, -0.988 ], [ -0.104, 1.7, 1.148 ], [ -1.244, 0.584, 1.156 ], ... ],
    	...
    ]

To produce the following output:

TrainingTester.java:633 executed in 0.00 seconds (0.000 gc):

    Result[] array = ConstantResult.batchResultArray(pop(RefUtil.addRef(data)));
    @Nullable
    Result eval = layer.eval(array);
    assert eval != null;
    TensorList tensorList = Result.getData(eval);
    String temp_18_0016 = tensorList.stream().limit(1).map(x -> {
      String temp_18_0017 = x.prettyPrint();
      x.freeRef();
      return temp_18_0017;
    }).reduce((a, b) -> a + "\n" + b).orElse("");
    tensorList.freeRef();
    return temp_18_0016;

Returns

    [
    	[ [ 2.0, 2.0, 2.0 ] ]
    ]

Conjugate Gradient Descent

First, we use a conjugate gradient descent method, which converges the fastest for purely linear functions.

TrainingTester.java:452 executed in 0.04 seconds (0.000 gc):

    IterativeTrainer iterativeTrainer = new IterativeTrainer(trainable.addRef());
    try {
      iterativeTrainer.setLineSearchFactory(label -> new QuadraticSearch());
      iterativeTrainer.setOrientation(new GradientDescent());
      iterativeTrainer.setMonitor(TrainingTester.getMonitor(history));
      iterativeTrainer.setTimeout(30, TimeUnit.SECONDS);
      iterativeTrainer.setMaxIterations(250);
      iterativeTrainer.setTerminateThreshold(0);
      return iterativeTrainer.run();
    } finally {
      iterativeTrainer.freeRef();
    }
Logging
Reset training subject: 2048991244534
Final threshold in iteration 0: 0.0 (> 0.0) after 0.039s (< 30.000s)

Returns

    0.0

This training apply resulted in the following configuration:

TrainingTester.java:610 executed in 0.00 seconds (0.000 gc):

    RefList<double[]> state = network.state();
    assert state != null;
    String description = state.stream().map(RefArrays::toString).reduce((a, b) -> a + "\n" + b)
        .orElse("");
    state.freeRef();
    return description;

Returns

    

And regressed input:

TrainingTester.java:622 executed in 0.01 seconds (0.000 gc):

    return RefArrays.stream(RefUtil.addRef(data)).flatMap(x -> {
      return RefArrays.stream(x);
    }).limit(1).map(x -> {
      String temp_18_0015 = x.prettyPrint();
      x.freeRef();
      return temp_18_0015;
    }).reduce((a, b) -> a + "\n" + b).orElse("");

Returns

    [
    	[ [ -0.336, -1.956, -0.632 ], [ 1.868, -1.34, -1.416 ], [ 0.252, 1.992, 0.316 ], [ 0.704, -0.54, 1.7 ], [ -1.636, 1.32, -1.768 ], [ -0.404, 0.74, 0.524 ], [ 0.192, 0.632, -1.736 ], [ -0.528, -0.204, 0.84 ], ... ],
    	[ [ -0.164, 1.844, -0.7 ], [ -1.1, -1.544, 0.128 ], [ 1.8, 1.808, 0.516 ], [ -1.9, 1.612, 1.204 ], [ 0.972, -0.936, -1.232 ], [ 0.684, 0.112, 0.332 ], [ 0.236, 1.78, 1.312 ], [ 0.904, 0.44, 1.004 ], ... ],
    	[ [ -1.016, 0.064, 1.352 ], [ -0.752, -0.896, 1.752 ], [ -1.072, 0.652, 1.416 ], [ -0.416, 1.828, -1.048 ], [ 1.368, 0.28, 0.916 ], [ -1.188, -1.212, -1.988 ], [ -1.904, 1.048, -0.708 ], [ 0.228, 1.868, -1.94 ], ... ],
    	[ [ -0.212, 0.356, -0.892 ], [ 1.416, 1.34, 0.828 ], [ -1.204, -1.436, 0.472 ], [ 0.692, 0.552, 1.156 ], [ 0.228, 1.028, -1.688 ], [ 1.808, -1.812, 1.432 ], [ -0.908, 1.84, 1.044 ], [ 1.956, 0.848, -1.26 ], ... ],
    	[ [ -0.604, -0.8, -1.632 ], [ -0.056, -0.196, -0.276 ], [ 1.648, 0.52, -1.204 ], [ 1.056, -0.256, -0.644 ], [ 1.064, -0.208, -0.612 ], [ 0.34, -0.616, -0.732 ], [ 0.948, -0.944, 1.772 ], [ -0.508, 0.932, 1.3 ], ... ],
    	[ [ 0.036, -0.46, -1.296 ], [ 0.468, 0.812, 1.068 ], [ 0.564, 0.988, 0.74 ], [ 0.72, 0.836, -1.592 ], [ 1.748, -1.628, -1.808 ], [ -0.948, 1.388, 0.132 ], [ -0.172, 0.48, 1.44 ], [ 1.292, -0.588, -1.832 ], ... ],
    	[ [ -0.384, 0.172, -0.508 ], [ -1.4, -0.536, 1.936 ], [ 0.384, 0.76, -1.72 ], [ -0.396, 1.916, 1.076 ], [ -0.42, -1.692, 0.644 ], [ 0.228, -1.988, 1.492 ], [ -1.824, 2.0, -0.904 ], [ 0.628, 1.0, 0.42 ], ... ],
    	[ [ -0.552, -1.12, 0.068 ], [ -1.328, -1.224, 1.36 ], [ 0.016, 0.012, 1.316 ], [ 0.3, -0.808, 0.72 ], [ 1.204, -0.296, -0.1 ], [ 1.944, 0.38, -0.988 ], [ -0.104, 1.7, 1.148 ], [ -1.244, 0.584, 1.156 ], ... ],
    	...
    ]

To produce the following output:

TrainingTester.java:633 executed in 0.00 seconds (0.000 gc):

    Result[] array = ConstantResult.batchResultArray(pop(RefUtil.addRef(data)));
    @Nullable
    Result eval = layer.eval(array);
    assert eval != null;
    TensorList tensorList = Result.getData(eval);
    String temp_18_0016 = tensorList.stream().limit(1).map(x -> {
      String temp_18_0017 = x.prettyPrint();
      x.freeRef();
      return temp_18_0017;
    }).reduce((a, b) -> a + "\n" + b).orElse("");
    tensorList.freeRef();
    return temp_18_0016;

Returns

    [
    	[ [ 2.0, 2.0, 2.0 ] ]
    ]

Limited-Memory BFGS

Next, we apply the same optimization using L-BFGS, which is nearly ideal for purely second-order or quadratic functions.

TrainingTester.java:509 executed in 0.04 seconds (0.000 gc):

    IterativeTrainer iterativeTrainer = new IterativeTrainer(trainable.addRef());
    try {
      iterativeTrainer.setLineSearchFactory(label -> new ArmijoWolfeSearch());
      iterativeTrainer.setOrientation(new LBFGS());
      iterativeTrainer.setMonitor(TrainingTester.getMonitor(history));
      iterativeTrainer.setTimeout(30, TimeUnit.SECONDS);
      iterativeTrainer.setIterationsPerSample(100);
      iterativeTrainer.setMaxIterations(250);
      iterativeTrainer.setTerminateThreshold(0);
      return iterativeTrainer.run();
    } finally {
      iterativeTrainer.freeRef();
    }
Logging
Reset training subject: 2049056526369
Final threshold in iteration 0: 0.0 (> 0.0) after 0.040s (< 30.000s)

Returns

    0.0

This training apply resulted in the following configuration:

TrainingTester.java:610 executed in 0.00 seconds (0.000 gc):

    RefList<double[]> state = network.state();
    assert state != null;
    String description = state.stream().map(RefArrays::toString).reduce((a, b) -> a + "\n" + b)
        .orElse("");
    state.freeRef();
    return description;

Returns

    

And regressed input:

TrainingTester.java:622 executed in 0.01 seconds (0.000 gc):

    return RefArrays.stream(RefUtil.addRef(data)).flatMap(x -> {
      return RefArrays.stream(x);
    }).limit(1).map(x -> {
      String temp_18_0015 = x.prettyPrint();
      x.freeRef();
      return temp_18_0015;
    }).reduce((a, b) -> a + "\n" + b).orElse("");

Returns

    [
    	[ [ -0.336, -1.956, -0.632 ], [ 1.868, -1.34, -1.416 ], [ 0.252, 1.992, 0.316 ], [ 0.704, -0.54, 1.7 ], [ -1.636, 1.32, -1.768 ], [ -0.404, 0.74, 0.524 ], [ 0.192, 0.632, -1.736 ], [ -0.528, -0.204, 0.84 ], ... ],
    	[ [ -0.164, 1.844, -0.7 ], [ -1.1, -1.544, 0.128 ], [ 1.8, 1.808, 0.516 ], [ -1.9, 1.612, 1.204 ], [ 0.972, -0.936, -1.232 ], [ 0.684, 0.112, 0.332 ], [ 0.236, 1.78, 1.312 ], [ 0.904, 0.44, 1.004 ], ... ],
    	[ [ -1.016, 0.064, 1.352 ], [ -0.752, -0.896, 1.752 ], [ -1.072, 0.652, 1.416 ], [ -0.416, 1.828, -1.048 ], [ 1.368, 0.28, 0.916 ], [ -1.188, -1.212, -1.988 ], [ -1.904, 1.048, -0.708 ], [ 0.228, 1.868, -1.94 ], ... ],
    	[ [ -0.212, 0.356, -0.892 ], [ 1.416, 1.34, 0.828 ], [ -1.204, -1.436, 0.472 ], [ 0.692, 0.552, 1.156 ], [ 0.228, 1.028, -1.688 ], [ 1.808, -1.812, 1.432 ], [ -0.908, 1.84, 1.044 ], [ 1.956, 0.848, -1.26 ], ... ],
    	[ [ -0.604, -0.8, -1.632 ], [ -0.056, -0.196, -0.276 ], [ 1.648, 0.52, -1.204 ], [ 1.056, -0.256, -0.644 ], [ 1.064, -0.208, -0.612 ], [ 0.34, -0.616, -0.732 ], [ 0.948, -0.944, 1.772 ], [ -0.508, 0.932, 1.3 ], ... ],
    	[ [ 0.036, -0.46, -1.296 ], [ 0.468, 0.812, 1.068 ], [ 0.564, 0.988, 0.74 ], [ 0.72, 0.836, -1.592 ], [ 1.748, -1.628, -1.808 ], [ -0.948, 1.388, 0.132 ], [ -0.172, 0.48, 1.44 ], [ 1.292, -0.588, -1.832 ], ... ],
    	[ [ -0.384, 0.172, -0.508 ], [ -1.4, -0.536, 1.936 ], [ 0.384, 0.76, -1.72 ], [ -0.396, 1.916, 1.076 ], [ -0.42, -1.692, 0.644 ], [ 0.228, -1.988, 1.492 ], [ -1.824, 2.0, -0.904 ], [ 0.628, 1.0, 0.42 ], ... ],
    	[ [ -0.552, -1.12, 0.068 ], [ -1.328, -1.224, 1.36 ], [ 0.016, 0.012, 1.316 ], [ 0.3, -0.808, 0.72 ], [ 1.204, -0.296, -0.1 ], [ 1.944, 0.38, -0.988 ], [ -0.104, 1.7, 1.148 ], [ -1.244, 0.584, 1.156 ], ... ],
    	...
    ]

To produce the following output:

TrainingTester.java:633 executed in 0.00 seconds (0.000 gc):

    Result[] array = ConstantResult.batchResultArray(pop(RefUtil.addRef(data)));
    @Nullable
    Result eval = layer.eval(array);
    assert eval != null;
    TensorList tensorList = Result.getData(eval);
    String temp_18_0016 = tensorList.stream().limit(1).map(x -> {
      String temp_18_0017 = x.prettyPrint();
      x.freeRef();
      return temp_18_0017;
    }).reduce((a, b) -> a + "\n" + b).orElse("");
    tensorList.freeRef();
    return temp_18_0016;

Returns

    [
    	[ [ 2.0, 2.0, 2.0 ] ]
    ]

TrainingTester.java:432 executed in 0.01 seconds (0.000 gc):

    return TestUtil.compare(title + " vs Iteration", runs);
Logging
No Data

TrainingTester.java:435 executed in 0.00 seconds (0.000 gc):

    return TestUtil.compareTime(title + " vs Time", runs);
Logging
No Data

Results

TrainingTester.java:255 executed in 0.04 seconds (0.000 gc):

    return grid(inputLearning, modelLearning, completeLearning);

Returns

Result

TrainingTester.java:258 executed in 0.00 seconds (0.000 gc):

    return new ComponentResult(null == inputLearning ? null : inputLearning.value,
        null == modelLearning ? null : modelLearning.value, null == completeLearning ? null : completeLearning.value);

Returns

    {"input":{ "LBFGS": { "type": "NonConverged", "value": NaN }, "CjGD": { "type": "NonConverged", "value": NaN }, "GD": { "type": "NonConverged", "value": NaN } }, "model":null, "complete":null}

LayerTests.java:425 executed in 0.00 seconds (0.000 gc):

    throwException(exceptions.addRef());

Results

detailsresult
{"input":{ "LBFGS": { "type": "NonConverged", "value": NaN }, "CjGD": { "type": "NonConverged", "value": NaN }, "GD": { "type": "NonConverged", "value": NaN } }, "model":null, "complete":null}OK
  {
    "result": "OK",
    "performance": {
      "execution_time": "0.887",
      "gc_time": "0.263"
    },
    "created_on": 1586736680377,
    "file_name": "trainingTest",
    "report": {
      "simpleName": "Asymmetric",
      "canonicalName": "com.simiacryptus.mindseye.layers.cudnn.BandReducerLayerTest.Asymmetric",
      "link": "https://github.com/SimiaCryptus/mindseye-cudnn/tree/59d5b3318556370acb2d83ee6ec123ce0fc6974f/src/test/java/com/simiacryptus/mindseye/layers/cudnn/BandReducerLayerTest.java",
      "javaDoc": ""
    },
    "training_analysis": {
      "input": {
        "LBFGS": {
          "type": "NonConverged",
          "value": "NaN"
        },
        "CjGD": {
          "type": "NonConverged",
          "value": "NaN"
        },
        "GD": {
          "type": "NonConverged",
          "value": "NaN"
        }
      }
    },
    "archive": "s3://code.simiacrypt.us/tests/com/simiacryptus/mindseye/layers/cudnn/BandReducerLayer/Asymmetric/trainingTest/202004131120",
    "id": "8b98b151-cdd8-4a42-91db-fc317569fc3f",
    "report_type": "Components",
    "display_name": "Comparative Training",
    "target": {
      "simpleName": "BandReducerLayer",
      "canonicalName": "com.simiacryptus.mindseye.layers.cudnn.BandReducerLayer",
      "link": "https://github.com/SimiaCryptus/mindseye-cudnn/tree/59d5b3318556370acb2d83ee6ec123ce0fc6974f/src/main/java/com/simiacryptus/mindseye/layers/cudnn/BandReducerLayer.java",
      "javaDoc": ""
    }
  }