4.2. Learning curve (CNN)

Computing the learning curve of the CNN for the prediction of exchangeable potassium (K ex.), with increasing number of training examples and using all Soil Taxonomy Orders.

if 'google.colab' in str(get_ipython()):
    from google.colab import drive
    drive.mount('/content/drive',  force_remount=False)
    !pip install mirzai
else:
Mounted at /content/drive
Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/
Collecting mirzai
  Downloading mirzai-0.2.10-py3-none-any.whl (25 kB)
Collecting matplotlib>=3.5.1
  Downloading matplotlib-3.5.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl (11.2 MB)
     |████████████████████████████████| 11.2 MB 10.8 MB/s 
Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from mirzai) (4.64.0)
Requirement already satisfied: torch in /usr/local/lib/python3.7/dist-packages (from mirzai) (1.12.1+cu113)
Requirement already satisfied: fastcore in /usr/local/lib/python3.7/dist-packages (from mirzai) (1.5.22)
Requirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from mirzai) (1.3.5)
Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from mirzai) (1.7.3)
Requirement already satisfied: scikit-learn in /usr/local/lib/python3.7/dist-packages (from mirzai) (1.0.2)
Collecting captum
  Downloading captum-0.5.0-py3-none-any.whl (1.4 MB)
     |████████████████████████████████| 1.4 MB 70.4 MB/s 
Requirement already satisfied: torchvision in /usr/local/lib/python3.7/dist-packages (from mirzai) (0.13.1+cu113)
Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from mirzai) (1.21.6)
Requirement already satisfied: pyparsing>=2.2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=3.5.1->mirzai) (3.0.9)
Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=3.5.1->mirzai) (21.3)
Requirement already satisfied: pillow>=6.2.0 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=3.5.1->mirzai) (7.1.2)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=3.5.1->mirzai) (1.4.4)
Collecting fonttools>=4.22.0
  Downloading fonttools-4.37.1-py3-none-any.whl (957 kB)
     |████████████████████████████████| 957 kB 68.4 MB/s 
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=3.5.1->mirzai) (0.11.0)
Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=3.5.1->mirzai) (2.8.2)
Requirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from kiwisolver>=1.0.1->matplotlib>=3.5.1->mirzai) (4.1.1)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7->matplotlib>=3.5.1->mirzai) (1.15.0)
Requirement already satisfied: pip in /usr/local/lib/python3.7/dist-packages (from fastcore->mirzai) (21.1.3)
Requirement already satisfied: pytz>=2017.3 in /usr/local/lib/python3.7/dist-packages (from pandas->mirzai) (2022.2.1)
Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn->mirzai) (1.1.0)
Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn->mirzai) (3.1.0)
Requirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from torchvision->mirzai) (2.23.0)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->torchvision->mirzai) (2022.6.15)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->torchvision->mirzai) (1.24.3)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->torchvision->mirzai) (3.0.4)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->torchvision->mirzai) (2.10)
Installing collected packages: fonttools, matplotlib, captum, mirzai
  Attempting uninstall: matplotlib
    Found existing installation: matplotlib 3.2.2
    Uninstalling matplotlib-3.2.2:
      Successfully uninstalled matplotlib-3.2.2
Successfully installed captum-0.5.0 fonttools-4.37.1 matplotlib-3.5.3 mirzai-0.2.10
Unable to display output for mime type(s): application/vnd.colab-display-data+json
# Python utils
import math
from collections import OrderedDict
from tqdm.auto import tqdm
from pathlib import Path
from functools import partial
import pickle
import glob

# mirzai utilities
from mirzai.data.loading import load_kssl
from mirzai.data.selection import (select_y, select_tax_order, select_X)
from mirzai.data.transform import log_transform_y
from mirzai.training.cnn import (Model, weights_init)
from mirzai.data.torch import DataLoaders, SNV_transform
from mirzai.training.cnn import Learner
from mirzai.training.core import is_plateau
from mirzai.training.metrics import eval_reg

from fastcore.transform import compose

# Data science stack
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split

# Deep Learning stack
import torch
from torch.nn import MSELoss
from torch.optim import Adam
from torch.optim.lr_scheduler import CyclicLR

import warnings
warnings.filterwarnings('ignore')

Load and transform

src_dir = '/content/drive/MyDrive/research/predict-k-mirs-dl/data/potassium'
fnames = ['spectra-features.npy', 'spectra-wavenumbers.npy', 
          'depth-order.npy', 'target.npy', 
          'tax-order-lu.pkl', 'spectra-id.npy']

X, X_names, depth_order, y, tax_lookup, X_id = load_kssl(src_dir, fnames=fnames)

data = X, y, X_id, depth_order

transforms = [select_y, select_tax_order, select_X, log_transform_y]
X, y, X_id, depth_order = compose(*transforms)(data)
print(f'X shape: {X.shape}')
print(f'y shape: {y.shape}')
print(f'Wavenumbers:\n {X_names}')
print(f'depth_order (first 3 rows):\n {depth_order[:3, :]}')
print(f'Taxonomic order lookup:\n {tax_lookup}')
X shape: (40132, 1764)
y shape: (40132,)
Wavenumbers:
 [3999 3997 3995 ...  603  601  599]
depth_order (first 3 rows):
 [[43.  2.]
 [ 0.  0.]
 [ 0.  1.]]
Taxonomic order lookup:
 {'alfisols': 0, 'mollisols': 1, 'inceptisols': 2, 'entisols': 3, 'spodosols': 4, 'undefined': 5, 'ultisols': 6, 'andisols': 7, 'histosols': 8, 'oxisols': 9, 'vertisols': 10, 'aridisols': 11, 'gelisols': 12}

Experiment

Setup

# Is a GPU available?
use_cuda = torch.cuda.is_available()
device = torch.device('cuda:0' if use_cuda else 'cpu')
print(f'Runtime is: {device}')

n_epochs = 151
step_size_up = 5
criterion = MSELoss() # Mean Squared Error loss
base_lr, max_lr = 3e-5, 1e-3 # Based on learning rate finder
delta = 1e-3 # Loss difference threshold for early stopping

dest_dir = Path('/content/drive/MyDrive/research/predict-k-mirs-dl/dumps/cnn/learning_curve')
Runtime is: cuda:0
training_size = [500, 1000, 2000, 5000, 10000, 20000, 30000, X.shape[0]]
split_ratio = 0.1
seeds = range(20)
for seed in tqdm(seeds):
    perfs_by_size = OrderedDict({'seed': [], 'n_samples': [], 'test_score': [], 'n_epochs': []})
    
    for size in training_size:
        print(80*'-')
        print(f'Seed: {seed} | Size: {size}')
        print(80*'-')
        idx = np.random.choice(len(X), size, replace=False)
        
        # Train/test split
        data = train_test_split(X[idx, :], 
                                y[idx], 
                                depth_order[idx,1], 
                                test_size=split_ratio,
                                random_state=seed)
        X_train, X_test, y_train, y_test, tax_order_train, tax_order_test = data
        data_test = X_test, y_test, tax_order_test

        # Further train/valid split
        data = train_test_split(X_train, 
                                y_train,
                                tax_order_train,
                                test_size=split_ratio, 
                                random_state=seed)
        X_train, X_valid, y_train, y_valid, tax_order_train, tax_order_valid = data
        data_train = X_train, y_train, tax_order_train
        data_valid = X_valid, y_valid, tax_order_valid
        
        dls = DataLoaders(data_train, 
                          data_valid,
                          data_test,
                          transform=SNV_transform(),
                          batch_size=32)

        training_generator, validation_generator, test_generator = dls.loaders()
        
        # Modeling
        model = Model(X.shape[1], out_channel=16).to(device)
        opt = Adam(model.parameters(), lr=1e-4)
        model = model.apply(weights_init)
        scheduler = CyclicLR(opt, base_lr=base_lr, max_lr=max_lr,
                             step_size_up=step_size_up, mode='triangular',
                             cycle_momentum=False)

        early_stopper = partial(is_plateau, delta=delta, verbose=False)

        learner = Learner(model, criterion, opt, n_epochs=n_epochs, 
                          scheduler=scheduler, early_stopper=early_stopper,
                          tax_lookup=tax_lookup.values(), verbose=True)
        model, losses = learner.fit(training_generator, validation_generator)

        y_hat, y_true = learner.predict(test_generator)
        perfs = eval_reg(y_true, y_hat)

        perfs_by_size['seed'].append(seed)
        perfs_by_size['n_samples'].append(size)
        perfs_by_size['n_epochs'].append(len(losses['train']))
        perfs_by_size['test_score'].append(perfs['r2'])

    with open(dest_dir/f'cnn-lc-seed-{seed}.pickle', 'wb') as f: 
        pickle.dump(perfs_by_size, f)
Streaming output truncated to the last 5000 lines.
------------------------------
Epoch: 6
Training loss: 0.06806543636222093 | Validation loss: 0.0665476806461811
Validation loss (ends of cycles): [0.29326259]
------------------------------
Epoch: 7
Training loss: 0.06351911573793878 | Validation loss: 0.06200351657574637
Validation loss (ends of cycles): [0.29326259]
------------------------------
Epoch: 8
Training loss: 0.05975192000159968 | Validation loss: 0.05620013448622143
Validation loss (ends of cycles): [0.29326259]
------------------------------
Epoch: 9
Training loss: 0.05616931922190053 | Validation loss: 0.053450687412630045
Validation loss (ends of cycles): [0.29326259]
------------------------------
Epoch: 10
Training loss: 0.05372029852261675 | Validation loss: 0.05114826099260857
Validation loss (ends of cycles): [0.29326259 0.05114826]
------------------------------
Epoch: 11
Training loss: 0.054583898482796475 | Validation loss: 0.05386886536552195
Validation loss (ends of cycles): [0.29326259 0.05114826]
------------------------------
Epoch: 12
Training loss: 0.05613075314673799 | Validation loss: 0.052798241344198846
Validation loss (ends of cycles): [0.29326259 0.05114826]
------------------------------
Epoch: 13
Training loss: 0.0567820893501389 | Validation loss: 0.05200839581850328
Validation loss (ends of cycles): [0.29326259 0.05114826]
------------------------------
Epoch: 14
Training loss: 0.057392622577485716 | Validation loss: 0.05300498377989259
Validation loss (ends of cycles): [0.29326259 0.05114826]
------------------------------
Epoch: 15
Training loss: 0.05863403260737246 | Validation loss: 0.05642465720966197
Validation loss (ends of cycles): [0.29326259 0.05114826]
------------------------------
Epoch: 16
Training loss: 0.05551970786097252 | Validation loss: 0.05141487086943367
Validation loss (ends of cycles): [0.29326259 0.05114826]
------------------------------
Epoch: 17
Training loss: 0.05263045195791378 | Validation loss: 0.04939890414345683
Validation loss (ends of cycles): [0.29326259 0.05114826]
------------------------------
Epoch: 18
Training loss: 0.049820137990884764 | Validation loss: 0.049233734019492804
Validation loss (ends of cycles): [0.29326259 0.05114826]
------------------------------
Epoch: 19
Training loss: 0.04752113914422147 | Validation loss: 0.047601242239276566
Validation loss (ends of cycles): [0.29326259 0.05114826]
------------------------------
Epoch: 20
Training loss: 0.04555318841097153 | Validation loss: 0.044789811596274376
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981]
------------------------------
Epoch: 21
Training loss: 0.04647652471265379 | Validation loss: 0.04712833372647302
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981]
------------------------------
Epoch: 22
Training loss: 0.0479339515235591 | Validation loss: 0.04763588224325264
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981]
------------------------------
Epoch: 23
Training loss: 0.048971499608465904 | Validation loss: 0.049810390854090975
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981]
------------------------------
Epoch: 24
Training loss: 0.05068343192618156 | Validation loss: 0.04795939717115017
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981]
------------------------------
Epoch: 25
Training loss: 0.05176023933558892 | Validation loss: 0.04928604261786269
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981]
------------------------------
Epoch: 26
Training loss: 0.04998573636603073 | Validation loss: 0.05063137840152832
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981]
------------------------------
Epoch: 27
Training loss: 0.04781438338274551 | Validation loss: 0.049298560344859174
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981]
------------------------------
Epoch: 28
Training loss: 0.044998955462840655 | Validation loss: 0.047076730155631116
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981]
------------------------------
Epoch: 29
Training loss: 0.04269748058948938 | Validation loss: 0.043681511099924124
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981]
------------------------------
Epoch: 30
Training loss: 0.04123661962486583 | Validation loss: 0.04148694225832036
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694]
------------------------------
Epoch: 31
Training loss: 0.04170608106831592 | Validation loss: 0.04531938837547051
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694]
------------------------------
Epoch: 32
Training loss: 0.04312830294171969 | Validation loss: 0.046768018224260265
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694]
------------------------------
Epoch: 33
Training loss: 0.04452355271407368 | Validation loss: 0.051704760902283486
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694]
------------------------------
Epoch: 34
Training loss: 0.045750220440313426 | Validation loss: 0.04653846119579516
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694]
------------------------------
Epoch: 35
Training loss: 0.04793604665217256 | Validation loss: 0.04717006236968333
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694]
------------------------------
Epoch: 36
Training loss: 0.04609691420353258 | Validation loss: 0.05028725784729447
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694]
------------------------------
Epoch: 37
Training loss: 0.04414579540094327 | Validation loss: 0.046487056699238326
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694]
------------------------------
Epoch: 38
Training loss: 0.04129064368374308 | Validation loss: 0.04598317131922956
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694]
------------------------------
Epoch: 39
Training loss: 0.03918340381903526 | Validation loss: 0.046684552441563526
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694]
------------------------------
Epoch: 40
Training loss: 0.03772568548212096 | Validation loss: 0.03923895272115866
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895]
------------------------------
Epoch: 41
Training loss: 0.038246894685121685 | Validation loss: 0.04450457794755174
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895]
------------------------------
Epoch: 42
Training loss: 0.03958957887546727 | Validation loss: 0.04442280089776767
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895]
------------------------------
Epoch: 43
Training loss: 0.04075290252044944 | Validation loss: 0.0414913749616397
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895]
------------------------------
Epoch: 44
Training loss: 0.04319122280095221 | Validation loss: 0.04472611011251023
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895]
------------------------------
Epoch: 45
Training loss: 0.044247216745270546 | Validation loss: 0.04369983495327465
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895]
------------------------------
Epoch: 46
Training loss: 0.04255689712355182 | Validation loss: 0.05006042926719314
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895]
------------------------------
Epoch: 47
Training loss: 0.04059595685577134 | Validation loss: 0.04566892837746102
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895]
------------------------------
Epoch: 48
Training loss: 0.03882334366095431 | Validation loss: 0.04446525734506155
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895]
------------------------------
Epoch: 49
Training loss: 0.036582596561480205 | Validation loss: 0.04001459044714769
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895]
------------------------------
Epoch: 50
Training loss: 0.03509080934814036 | Validation loss: 0.0384905424884014
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054]
------------------------------
Epoch: 51
Training loss: 0.03569372744653999 | Validation loss: 0.039927559855737184
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054]
------------------------------
Epoch: 52
Training loss: 0.037237670164209966 | Validation loss: 0.04519388896592876
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054]
------------------------------
Epoch: 53
Training loss: 0.03838471407750419 | Validation loss: 0.0458477276066939
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054]
------------------------------
Epoch: 54
Training loss: 0.03979467994788812 | Validation loss: 0.04402068803054199
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054]
------------------------------
Epoch: 55
Training loss: 0.041505508775100904 | Validation loss: 0.04638113081455231
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054]
------------------------------
Epoch: 56
Training loss: 0.04012122852048108 | Validation loss: 0.042935863954194804
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054]
------------------------------
Epoch: 57
Training loss: 0.03744441115240432 | Validation loss: 0.04187299245805071
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054]
------------------------------
Epoch: 58
Training loss: 0.03562752796884413 | Validation loss: 0.04327832126434435
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054]
------------------------------
Epoch: 59
Training loss: 0.033714442978625934 | Validation loss: 0.03990272723399756
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054]
------------------------------
Epoch: 60
Training loss: 0.03269251721821123 | Validation loss: 0.037310676289755
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068]
------------------------------
Epoch: 61
Training loss: 0.03308601810962432 | Validation loss: 0.03998835182242226
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068]
------------------------------
Epoch: 62
Training loss: 0.034110041697099955 | Validation loss: 0.043654320042645724
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068]
------------------------------
Epoch: 63
Training loss: 0.03643204273630293 | Validation loss: 0.042565111332295236
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068]
------------------------------
Epoch: 64
Training loss: 0.037886392731124006 | Validation loss: 0.04307818798380986
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068]
------------------------------
Epoch: 65
Training loss: 0.039455032082645614 | Validation loss: 0.050652103890713895
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068]
------------------------------
Epoch: 66
Training loss: 0.03784991302610149 | Validation loss: 0.05323615209444573
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068]
------------------------------
Epoch: 67
Training loss: 0.03613430108963209 | Validation loss: 0.0495827330374404
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068]
------------------------------
Epoch: 68
Training loss: 0.03396667248185511 | Validation loss: 0.05136706980696896
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068]
------------------------------
Epoch: 69
Training loss: 0.03226531962671223 | Validation loss: 0.03999559313320277
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068]
------------------------------
Epoch: 70
Training loss: 0.03079099055284109 | Validation loss: 0.03639809499707138
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068 0.03639809]
------------------------------
Epoch: 71
Training loss: 0.03151882326329601 | Validation loss: 0.037147419167715204
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068 0.03639809]
------------------------------
Epoch: 72
Training loss: 0.032178009986965614 | Validation loss: 0.04826249839051774
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068 0.03639809]
------------------------------
Epoch: 73
Training loss: 0.03405666328724143 | Validation loss: 0.04936403996850315
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068 0.03639809]
------------------------------
Epoch: 74
Training loss: 0.03541494072718028 | Validation loss: 0.04829113718056888
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068 0.03639809]
------------------------------
Epoch: 75
Training loss: 0.03756211765103145 | Validation loss: 0.04617483740705147
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068 0.03639809]
------------------------------
Epoch: 76
Training loss: 0.03559289298973669 | Validation loss: 0.04658271506298007
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068 0.03639809]
------------------------------
Epoch: 77
Training loss: 0.03390944113907141 | Validation loss: 0.04650598888595899
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068 0.03639809]
------------------------------
Epoch: 78
Training loss: 0.03154865592806297 | Validation loss: 0.043829943690645065
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068 0.03639809]
------------------------------
Epoch: 79
Training loss: 0.03073635195699728 | Validation loss: 0.03725755015355453
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068 0.03639809]
------------------------------
Epoch: 80
Training loss: 0.0294124557080089 | Validation loss: 0.03590158830609238
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068 0.03639809 0.03590159]
------------------------------
Epoch: 81
Training loss: 0.02953582158756738 | Validation loss: 0.037577209700095024
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068 0.03639809 0.03590159]
------------------------------
Epoch: 82
Training loss: 0.030502555425291702 | Validation loss: 0.044688741823560314
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068 0.03639809 0.03590159]
------------------------------
Epoch: 83
Training loss: 0.03210949529278325 | Validation loss: 0.05308352276813565
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068 0.03639809 0.03590159]
------------------------------
Epoch: 84
Training loss: 0.03396462016468923 | Validation loss: 0.04562949912067045
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068 0.03639809 0.03590159]
------------------------------
Epoch: 85
Training loss: 0.03592904591179752 | Validation loss: 0.045200854474515245
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068 0.03639809 0.03590159]
------------------------------
Epoch: 86
Training loss: 0.03457508122463962 | Validation loss: 0.050139864481854854
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068 0.03639809 0.03590159]
------------------------------
Epoch: 87
Training loss: 0.03230217536562352 | Validation loss: 0.04341460270970537
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068 0.03639809 0.03590159]
------------------------------
Epoch: 88
Training loss: 0.03038559539548509 | Validation loss: 0.04495927831974991
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068 0.03639809 0.03590159]
------------------------------
Epoch: 89
Training loss: 0.02869941036104862 | Validation loss: 0.03822105756977148
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068 0.03639809 0.03590159]
------------------------------
Epoch: 90
Training loss: 0.02780957057285885 | Validation loss: 0.03527055269009188
Validation loss (ends of cycles): [0.29326259 0.05114826 0.04478981 0.04148694 0.03923895 0.03849054
 0.03731068 0.03639809 0.03590159 0.03527055]
Early stopping!
--------------------------------------------------------------------------------
Seed: 9 | Size: 30000
--------------------------------------------------------------------------------
------------------------------
Epoch: 0
Training loss: 0.147111305290539 | Validation loss: 0.1315017819404602
Validation loss (ends of cycles): [0.13150178]
------------------------------
Epoch: 1
Training loss: 0.09975460252577537 | Validation loss: 0.0852299884399947
Validation loss (ends of cycles): [0.13150178]
------------------------------
Epoch: 2
Training loss: 0.08679056662674013 | Validation loss: 0.08812916081617861
Validation loss (ends of cycles): [0.13150178]
------------------------------
Epoch: 3
Training loss: 0.08041144743267643 | Validation loss: 0.0723718693589463
Validation loss (ends of cycles): [0.13150178]
------------------------------
Epoch: 4
Training loss: 0.07665490742870852 | Validation loss: 0.06908508214880438
Validation loss (ends of cycles): [0.13150178]
------------------------------
Epoch: 5
Training loss: 0.07295063323782462 | Validation loss: 0.064691921192057
Validation loss (ends of cycles): [0.13150178]
------------------------------
Epoch: 6
Training loss: 0.06718138600944688 | Validation loss: 0.058289176620104736
Validation loss (ends of cycles): [0.13150178]
------------------------------
Epoch: 7
Training loss: 0.06265752670159074 | Validation loss: 0.05494928048814044
Validation loss (ends of cycles): [0.13150178]
------------------------------
Epoch: 8
Training loss: 0.05877749496139586 | Validation loss: 0.05109663200290764
Validation loss (ends of cycles): [0.13150178]
------------------------------
Epoch: 9
Training loss: 0.05578988529496679 | Validation loss: 0.04918082448489526
Validation loss (ends of cycles): [0.13150178]
------------------------------
Epoch: 10
Training loss: 0.05317496091960684 | Validation loss: 0.04732894088853808
Validation loss (ends of cycles): [0.13150178 0.04732894]
------------------------------
Epoch: 11
Training loss: 0.05387537818843205 | Validation loss: 0.04777663764269913
Validation loss (ends of cycles): [0.13150178 0.04732894]
------------------------------
Epoch: 12
Training loss: 0.05503765009588709 | Validation loss: 0.049427153915166853
Validation loss (ends of cycles): [0.13150178 0.04732894]
------------------------------
Epoch: 13
Training loss: 0.05634034368160524 | Validation loss: 0.048755222164532715
Validation loss (ends of cycles): [0.13150178 0.04732894]
------------------------------
Epoch: 14
Training loss: 0.0574380914239507 | Validation loss: 0.05096663817325059
Validation loss (ends of cycles): [0.13150178 0.04732894]
------------------------------
Epoch: 15
Training loss: 0.0579996428844568 | Validation loss: 0.05888870698125923
Validation loss (ends of cycles): [0.13150178 0.04732894]
------------------------------
Epoch: 16
Training loss: 0.05525187691183467 | Validation loss: 0.048332023379557276
Validation loss (ends of cycles): [0.13150178 0.04732894]
------------------------------
Epoch: 17
Training loss: 0.05298996464913025 | Validation loss: 0.04795002014759709
Validation loss (ends of cycles): [0.13150178 0.04732894]
------------------------------
Epoch: 18
Training loss: 0.050095231600693964 | Validation loss: 0.04421709517345709
Validation loss (ends of cycles): [0.13150178 0.04732894]
------------------------------
Epoch: 19
Training loss: 0.04788641859042017 | Validation loss: 0.04255331138915876
Validation loss (ends of cycles): [0.13150178 0.04732894]
------------------------------
Epoch: 20
Training loss: 0.04585613830162114 | Validation loss: 0.0418057950980523
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058 ]
------------------------------
Epoch: 21
Training loss: 0.046981786802950266 | Validation loss: 0.042653957995421744
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058 ]
------------------------------
Epoch: 22
Training loss: 0.04777146606019845 | Validation loss: 0.044101270704584965
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058 ]
------------------------------
Epoch: 23
Training loss: 0.04940902760350391 | Validation loss: 0.04385254315155394
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058 ]
------------------------------
Epoch: 24
Training loss: 0.05038880116649364 | Validation loss: 0.04633155581267441
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058 ]
------------------------------
Epoch: 25
Training loss: 0.05155313427461997 | Validation loss: 0.046872320508255676
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058 ]
------------------------------
Epoch: 26
Training loss: 0.0495440785699573 | Validation loss: 0.045494657645330706
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058 ]
------------------------------
Epoch: 27
Training loss: 0.047308244940971855 | Validation loss: 0.04535501838168677
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058 ]
------------------------------
Epoch: 28
Training loss: 0.04507004849147052 | Validation loss: 0.04133476996246506
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058 ]
------------------------------
Epoch: 29
Training loss: 0.042914133947832805 | Validation loss: 0.04083044673590099
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058 ]
------------------------------
Epoch: 30
Training loss: 0.040776250611892655 | Validation loss: 0.03877844817059881
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845]
------------------------------
Epoch: 31
Training loss: 0.04152484470733294 | Validation loss: 0.039483094083912235
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845]
------------------------------
Epoch: 32
Training loss: 0.04290025286670578 | Validation loss: 0.040904018326717265
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845]
------------------------------
Epoch: 33
Training loss: 0.044265624253373395 | Validation loss: 0.04122531755882151
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845]
------------------------------
Epoch: 34
Training loss: 0.046046443509035986 | Validation loss: 0.04211513080141124
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845]
------------------------------
Epoch: 35
Training loss: 0.047449978132193024 | Validation loss: 0.04790202175431392
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845]
------------------------------
Epoch: 36
Training loss: 0.04531032362903811 | Validation loss: 0.046201490764232245
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845]
------------------------------
Epoch: 37
Training loss: 0.04320118561950757 | Validation loss: 0.040469889671486965
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845]
------------------------------
Epoch: 38
Training loss: 0.0409529635376346 | Validation loss: 0.039225977001821295
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845]
------------------------------
Epoch: 39
Training loss: 0.039055508706032444 | Validation loss: 0.03890094250878867
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845]
------------------------------
Epoch: 40
Training loss: 0.03767110928525462 | Validation loss: 0.03655863406465334
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863]
------------------------------
Epoch: 41
Training loss: 0.03817510909621457 | Validation loss: 0.03774878807804164
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863]
------------------------------
Epoch: 42
Training loss: 0.0393792031259325 | Validation loss: 0.03817844322937376
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863]
------------------------------
Epoch: 43
Training loss: 0.04070846887648498 | Validation loss: 0.04050976456526448
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863]
------------------------------
Epoch: 44
Training loss: 0.0424697090467242 | Validation loss: 0.041637066970853245
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863]
------------------------------
Epoch: 45
Training loss: 0.043809773583014154 | Validation loss: 0.04080562661675846
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863]
------------------------------
Epoch: 46
Training loss: 0.04217543903811786 | Validation loss: 0.03963810448699138
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863]
------------------------------
Epoch: 47
Training loss: 0.040034622934303786 | Validation loss: 0.03918085889342953
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863]
------------------------------
Epoch: 48
Training loss: 0.03804198447492366 | Validation loss: 0.03835276941604474
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863]
------------------------------
Epoch: 49
Training loss: 0.036407624813728036 | Validation loss: 0.03730193979161627
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863]
------------------------------
Epoch: 50
Training loss: 0.03499086681557329 | Validation loss: 0.035120617181939234
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062]
------------------------------
Epoch: 51
Training loss: 0.03556273958253625 | Validation loss: 0.03798426411607686
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062]
------------------------------
Epoch: 52
Training loss: 0.03691161809562656 | Validation loss: 0.037254741152419765
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062]
------------------------------
Epoch: 53
Training loss: 0.03808860194648763 | Validation loss: 0.03941758612499518
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062]
------------------------------
Epoch: 54
Training loss: 0.03961590641227208 | Validation loss: 0.03795143950949697
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062]
------------------------------
Epoch: 55
Training loss: 0.041122380242143805 | Validation loss: 0.047902766505585
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062]
------------------------------
Epoch: 56
Training loss: 0.039530974404739315 | Validation loss: 0.03792385430458714
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062]
------------------------------
Epoch: 57
Training loss: 0.03773507730373622 | Validation loss: 0.037603619760450194
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062]
------------------------------
Epoch: 58
Training loss: 0.03599328672219264 | Validation loss: 0.037073982134461406
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062]
------------------------------
Epoch: 59
Training loss: 0.03398567610665371 | Validation loss: 0.03748219501884545
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062]
------------------------------
Epoch: 60
Training loss: 0.03271165021670688 | Validation loss: 0.03435478813069708
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062
 0.03435479]
------------------------------
Epoch: 61
Training loss: 0.03338361110685295 | Validation loss: 0.036062022934065144
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062
 0.03435479]
------------------------------
Epoch: 62
Training loss: 0.034301962170406784 | Validation loss: 0.037316511702888154
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062
 0.03435479]
------------------------------
Epoch: 63
Training loss: 0.03564163815401691 | Validation loss: 0.0389238071792266
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062
 0.03435479]
------------------------------
Epoch: 64
Training loss: 0.0370434378567887 | Validation loss: 0.04136042470002876
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062
 0.03435479]
------------------------------
Epoch: 65
Training loss: 0.03914995556819792 | Validation loss: 0.04118646508192315
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062
 0.03435479]
------------------------------
Epoch: 66
Training loss: 0.03734212646040281 | Validation loss: 0.039161085907150714
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062
 0.03435479]
------------------------------
Epoch: 67
Training loss: 0.03560297669058567 | Validation loss: 0.0376554255538127
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062
 0.03435479]
------------------------------
Epoch: 68
Training loss: 0.03367881167091821 | Validation loss: 0.03781347434748621
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062
 0.03435479]
------------------------------
Epoch: 69
Training loss: 0.03207173739608966 | Validation loss: 0.03692457919173381
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062
 0.03435479]
------------------------------
Epoch: 70
Training loss: 0.03081994304237397 | Validation loss: 0.0337879949632813
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062
 0.03435479 0.03378799]
------------------------------
Epoch: 71
Training loss: 0.03131349330498396 | Validation loss: 0.03656278537476764
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062
 0.03435479 0.03378799]
------------------------------
Epoch: 72
Training loss: 0.032406741111098154 | Validation loss: 0.036839769627241524
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062
 0.03435479 0.03378799]
------------------------------
Epoch: 73
Training loss: 0.03356948233773246 | Validation loss: 0.04138847341870561
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062
 0.03435479 0.03378799]
------------------------------
Epoch: 74
Training loss: 0.03554210637352968 | Validation loss: 0.03746587980319472
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062
 0.03435479 0.03378799]
------------------------------
Epoch: 75
Training loss: 0.03756678962197743 | Validation loss: 0.04222078783547177
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062
 0.03435479 0.03378799]
------------------------------
Epoch: 76
Training loss: 0.03534613802269297 | Validation loss: 0.03830132688231328
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062
 0.03435479 0.03378799]
------------------------------
Epoch: 77
Training loss: 0.03376366636887389 | Validation loss: 0.04012722719241591
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062
 0.03435479 0.03378799]
------------------------------
Epoch: 78
Training loss: 0.03233612979468154 | Validation loss: 0.037108558964203384
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062
 0.03435479 0.03378799]
------------------------------
Epoch: 79
Training loss: 0.03046056817800395 | Validation loss: 0.036080013193628364
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062
 0.03435479 0.03378799]
------------------------------
Epoch: 80
Training loss: 0.029702923919907524 | Validation loss: 0.03293276488342706
Validation loss (ends of cycles): [0.13150178 0.04732894 0.0418058  0.03877845 0.03655863 0.03512062
 0.03435479 0.03378799 0.03293276]
Early stopping!
--------------------------------------------------------------------------------
Seed: 9 | Size: 40132
--------------------------------------------------------------------------------
------------------------------
Epoch: 0
Training loss: 0.26767727328596386 | Validation loss: 0.20993191188415594
Validation loss (ends of cycles): [0.20993191]
------------------------------
Epoch: 1
Training loss: 0.11062715910595115 | Validation loss: 0.09041815471992029
Validation loss (ends of cycles): [0.20993191]
------------------------------
Epoch: 2
Training loss: 0.08798102279171699 | Validation loss: 0.083032216448172
Validation loss (ends of cycles): [0.20993191]
------------------------------
Epoch: 3
Training loss: 0.0831462372530721 | Validation loss: 0.08245944924059168
Validation loss (ends of cycles): [0.20993191]
------------------------------
Epoch: 4
Training loss: 0.0791002170138765 | Validation loss: 0.07641426572757484
Validation loss (ends of cycles): [0.20993191]
------------------------------
Epoch: 5
Training loss: 0.07637509745985681 | Validation loss: 0.07529779728007528
Validation loss (ends of cycles): [0.20993191]
------------------------------
Epoch: 6
Training loss: 0.07119164006293231 | Validation loss: 0.074695995984088
Validation loss (ends of cycles): [0.20993191]
------------------------------
Epoch: 7
Training loss: 0.06693422179815807 | Validation loss: 0.06877124391957722
Validation loss (ends of cycles): [0.20993191]
------------------------------
Epoch: 8
Training loss: 0.06382161711423298 | Validation loss: 0.06549916515308143
Validation loss (ends of cycles): [0.20993191]
------------------------------
Epoch: 9
Training loss: 0.06060796096099643 | Validation loss: 0.06079392095582675
Validation loss (ends of cycles): [0.20993191]
------------------------------
Epoch: 10
Training loss: 0.05856810573774471 | Validation loss: 0.057926995514900284
Validation loss (ends of cycles): [0.20993191 0.057927  ]
------------------------------
Epoch: 11
Training loss: 0.05894209550121638 | Validation loss: 0.06261971423122208
Validation loss (ends of cycles): [0.20993191 0.057927  ]
------------------------------
Epoch: 12
Training loss: 0.059919283873819576 | Validation loss: 0.06526916124651917
Validation loss (ends of cycles): [0.20993191 0.057927  ]
------------------------------
Epoch: 13
Training loss: 0.060730206374973644 | Validation loss: 0.06995311323388488
Validation loss (ends of cycles): [0.20993191 0.057927  ]
------------------------------
Epoch: 14
Training loss: 0.06151795924428528 | Validation loss: 0.07613310333242458
Validation loss (ends of cycles): [0.20993191 0.057927  ]
------------------------------
Epoch: 15
Training loss: 0.061776850015057 | Validation loss: 0.07298920982706864
Validation loss (ends of cycles): [0.20993191 0.057927  ]
------------------------------
Epoch: 16
Training loss: 0.05926943988920608 | Validation loss: 0.06051817781959487
Validation loss (ends of cycles): [0.20993191 0.057927  ]
------------------------------
Epoch: 17
Training loss: 0.0569572605478658 | Validation loss: 0.06914879089897186
Validation loss (ends of cycles): [0.20993191 0.057927  ]
------------------------------
Epoch: 18
Training loss: 0.05450942960391775 | Validation loss: 0.06324264410454615
Validation loss (ends of cycles): [0.20993191 0.057927  ]
------------------------------
Epoch: 19
Training loss: 0.052381872527638644 | Validation loss: 0.0565843051052199
Validation loss (ends of cycles): [0.20993191 0.057927  ]
------------------------------
Epoch: 20
Training loss: 0.050279307501815904 | Validation loss: 0.05087755673227057
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756]
------------------------------
Epoch: 21
Training loss: 0.05125819389913671 | Validation loss: 0.055446823069875216
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756]
------------------------------
Epoch: 22
Training loss: 0.05232975868449554 | Validation loss: 0.06173066887180362
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756]
------------------------------
Epoch: 23
Training loss: 0.05332614857828113 | Validation loss: 0.07285340931431382
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756]
------------------------------
Epoch: 24
Training loss: 0.05472646032526033 | Validation loss: 0.07538743981415719
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756]
------------------------------
Epoch: 25
Training loss: 0.05555287960506096 | Validation loss: 0.06203558999814291
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756]
------------------------------
Epoch: 26
Training loss: 0.05368010373602761 | Validation loss: 0.06663409501841638
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756]
------------------------------
Epoch: 27
Training loss: 0.05148195093277636 | Validation loss: 0.06459290692500308
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756]
------------------------------
Epoch: 28
Training loss: 0.049331870113360134 | Validation loss: 0.05822600282530869
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756]
------------------------------
Epoch: 29
Training loss: 0.04742172895197382 | Validation loss: 0.05136353392318814
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756]
------------------------------
Epoch: 30
Training loss: 0.04593015410094045 | Validation loss: 0.04718089631173463
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809 ]
------------------------------
Epoch: 31
Training loss: 0.04645789550767669 | Validation loss: 0.05242063497415686
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809 ]
------------------------------
Epoch: 32
Training loss: 0.04760350834387611 | Validation loss: 0.0537388277099987
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809 ]
------------------------------
Epoch: 33
Training loss: 0.04892369482930251 | Validation loss: 0.05456192056294036
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809 ]
------------------------------
Epoch: 34
Training loss: 0.05001625064833779 | Validation loss: 0.053328140350305926
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809 ]
------------------------------
Epoch: 35
Training loss: 0.05126057072417942 | Validation loss: 0.05397078264669507
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809 ]
------------------------------
Epoch: 36
Training loss: 0.04982032163039319 | Validation loss: 0.05863784631071365
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809 ]
------------------------------
Epoch: 37
Training loss: 0.04786631208661152 | Validation loss: 0.06205748732045161
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809 ]
------------------------------
Epoch: 38
Training loss: 0.045445062373178156 | Validation loss: 0.05508627135932973
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809 ]
------------------------------
Epoch: 39
Training loss: 0.04398632775368829 | Validation loss: 0.05074058398934065
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809 ]
------------------------------
Epoch: 40
Training loss: 0.04237788485722455 | Validation loss: 0.04485161622277403
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162]
------------------------------
Epoch: 41
Training loss: 0.04334347072886083 | Validation loss: 0.048917631247797896
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162]
------------------------------
Epoch: 42
Training loss: 0.04402575698153182 | Validation loss: 0.05648849759481649
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162]
------------------------------
Epoch: 43
Training loss: 0.0455412358104244 | Validation loss: 0.05538065110094252
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162]
------------------------------
Epoch: 44
Training loss: 0.04707928725488953 | Validation loss: 0.058348986625143914
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162]
------------------------------
Epoch: 45
Training loss: 0.04844906317495455 | Validation loss: 0.052721294714550004
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162]
------------------------------
Epoch: 46
Training loss: 0.04671046115993339 | Validation loss: 0.06256921447615708
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162]
------------------------------
Epoch: 47
Training loss: 0.044951911013771405 | Validation loss: 0.06044869774342638
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162]
------------------------------
Epoch: 48
Training loss: 0.04295778853404446 | Validation loss: 0.05060311258498546
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162]
------------------------------
Epoch: 49
Training loss: 0.041293553852175514 | Validation loss: 0.04906327166981929
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162]
------------------------------
Epoch: 50
Training loss: 0.03999335623683599 | Validation loss: 0.043351529924347335
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153]
------------------------------
Epoch: 51
Training loss: 0.04056466871131886 | Validation loss: 0.049954215410800105
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153]
------------------------------
Epoch: 52
Training loss: 0.04156503059505302 | Validation loss: 0.0522121147343279
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153]
------------------------------
Epoch: 53
Training loss: 0.04284362089416877 | Validation loss: 0.05801308550665864
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153]
------------------------------
Epoch: 54
Training loss: 0.0443785713149572 | Validation loss: 0.05014750747158464
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153]
------------------------------
Epoch: 55
Training loss: 0.04580919714882882 | Validation loss: 0.06235515744944589
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153]
------------------------------
Epoch: 56
Training loss: 0.044337539317629 | Validation loss: 0.05064553986674389
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153]
------------------------------
Epoch: 57
Training loss: 0.04247408428625387 | Validation loss: 0.05216136716504013
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153]
------------------------------
Epoch: 58
Training loss: 0.040845778049336465 | Validation loss: 0.044927841662305644
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153]
------------------------------
Epoch: 59
Training loss: 0.03896903427222406 | Validation loss: 0.0456106700167983
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153]
------------------------------
Epoch: 60
Training loss: 0.03817753170022932 | Validation loss: 0.041548734382454273
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873]
------------------------------
Epoch: 61
Training loss: 0.0385868555479353 | Validation loss: 0.0453451023626644
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873]
------------------------------
Epoch: 62
Training loss: 0.03971990962663123 | Validation loss: 0.05200313806401945
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873]
------------------------------
Epoch: 63
Training loss: 0.04082011654753032 | Validation loss: 0.05165595175550047
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873]
------------------------------
Epoch: 64
Training loss: 0.04232948364260951 | Validation loss: 0.04828151575891317
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873]
------------------------------
Epoch: 65
Training loss: 0.04398110482519067 | Validation loss: 0.0522270905331964
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873]
------------------------------
Epoch: 66
Training loss: 0.04246964514636853 | Validation loss: 0.04821099462894212
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873]
------------------------------
Epoch: 67
Training loss: 0.04050930707319456 | Validation loss: 0.05413374576750582
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873]
------------------------------
Epoch: 68
Training loss: 0.03861887257832183 | Validation loss: 0.045728032221704455
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873]
------------------------------
Epoch: 69
Training loss: 0.03748417487477986 | Validation loss: 0.04113612045426812
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873]
------------------------------
Epoch: 70
Training loss: 0.036482414228870996 | Validation loss: 0.03996460937556967
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461]
------------------------------
Epoch: 71
Training loss: 0.036949899838687336 | Validation loss: 0.04259959054467952
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461]
------------------------------
Epoch: 72
Training loss: 0.03779611252358286 | Validation loss: 0.05049621508316656
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461]
------------------------------
Epoch: 73
Training loss: 0.03913594540263578 | Validation loss: 0.046180599443284814
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461]
------------------------------
Epoch: 74
Training loss: 0.04059691341890858 | Validation loss: 0.05154817353571411
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461]
------------------------------
Epoch: 75
Training loss: 0.04208160265473517 | Validation loss: 0.05156806101445603
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461]
------------------------------
Epoch: 76
Training loss: 0.0406858466682941 | Validation loss: 0.05118079272519171
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461]
------------------------------
Epoch: 77
Training loss: 0.039154747262685086 | Validation loss: 0.04794440525625132
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461]
------------------------------
Epoch: 78
Training loss: 0.03741198778152466 | Validation loss: 0.045822479308838336
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461]
------------------------------
Epoch: 79
Training loss: 0.03600720611793231 | Validation loss: 0.041700294241309166
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461]
------------------------------
Epoch: 80
Training loss: 0.034843648902111224 | Validation loss: 0.03910587075273547
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461 0.03910587]
------------------------------
Epoch: 81
Training loss: 0.03538154784450674 | Validation loss: 0.041343334930396713
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461 0.03910587]
------------------------------
Epoch: 82
Training loss: 0.036538641037250776 | Validation loss: 0.04486508937799825
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461 0.03910587]
------------------------------
Epoch: 83
Training loss: 0.03746334522573908 | Validation loss: 0.04573550967054557
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461 0.03910587]
------------------------------
Epoch: 84
Training loss: 0.03906626951345426 | Validation loss: 0.04793903403050077
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461 0.03910587]
------------------------------
Epoch: 85
Training loss: 0.04067838922652643 | Validation loss: 0.04968288084244834
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461 0.03910587]
------------------------------
Epoch: 86
Training loss: 0.03920144755136603 | Validation loss: 0.04930177286465084
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461 0.03910587]
------------------------------
Epoch: 87
Training loss: 0.03762676272317621 | Validation loss: 0.05426667930673709
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461 0.03910587]
------------------------------
Epoch: 88
Training loss: 0.0362003557594845 | Validation loss: 0.045433557897278695
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461 0.03910587]
------------------------------
Epoch: 89
Training loss: 0.034699298914404604 | Validation loss: 0.041561927959586666
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461 0.03910587]
------------------------------
Epoch: 90
Training loss: 0.03369743091248592 | Validation loss: 0.038345360271303
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461 0.03910587 0.03834536]
------------------------------
Epoch: 91
Training loss: 0.034240884567046256 | Validation loss: 0.04051130935879408
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461 0.03910587 0.03834536]
------------------------------
Epoch: 92
Training loss: 0.03505190609088974 | Validation loss: 0.04553813698281229
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461 0.03910587 0.03834536]
------------------------------
Epoch: 93
Training loss: 0.036325072192249626 | Validation loss: 0.04458336504093841
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461 0.03910587 0.03834536]
------------------------------
Epoch: 94
Training loss: 0.038167144151727166 | Validation loss: 0.05034423378848397
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461 0.03910587 0.03834536]
------------------------------
Epoch: 95
Training loss: 0.03953559986795995 | Validation loss: 0.04722861444528124
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461 0.03910587 0.03834536]
------------------------------
Epoch: 96
Training loss: 0.038086684345539044 | Validation loss: 0.04692284245274763
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461 0.03910587 0.03834536]
------------------------------
Epoch: 97
Training loss: 0.036556421646180996 | Validation loss: 0.042711537298375526
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461 0.03910587 0.03834536]
------------------------------
Epoch: 98
Training loss: 0.035050427166224404 | Validation loss: 0.04218942583002876
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461 0.03910587 0.03834536]
------------------------------
Epoch: 99
Training loss: 0.033719378285198 | Validation loss: 0.04147126843773686
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461 0.03910587 0.03834536]
------------------------------
Epoch: 100
Training loss: 0.03284708765889041 | Validation loss: 0.03807422106640529
Validation loss (ends of cycles): [0.20993191 0.057927   0.05087756 0.0471809  0.04485162 0.04335153
 0.04154873 0.03996461 0.03910587 0.03834536 0.03807422]
Early stopping!
--------------------------------------------------------------------------------
Seed: 10 | Size: 500
--------------------------------------------------------------------------------
------------------------------
Epoch: 0
Training loss: 0.28623229150588697 | Validation loss: 0.2876668721437454
Validation loss (ends of cycles): [0.28766687]
------------------------------
Epoch: 1
Training loss: 0.2732484845014719 | Validation loss: 0.2794637233018875
Validation loss (ends of cycles): [0.28766687]
------------------------------
Epoch: 2
Training loss: 0.24852608488156244 | Validation loss: 0.26532696187496185
Validation loss (ends of cycles): [0.28766687]
------------------------------
Epoch: 3
Training loss: 0.21454166219784662 | Validation loss: 0.244778074324131
Validation loss (ends of cycles): [0.28766687]
------------------------------
Epoch: 4
Training loss: 0.1760728319103901 | Validation loss: 0.227938711643219
Validation loss (ends of cycles): [0.28766687]
------------------------------
Epoch: 5
Training loss: 0.13927876548125193 | Validation loss: 0.2178543582558632
Validation loss (ends of cycles): [0.28766687]
------------------------------
Epoch: 6
Training loss: 0.1165735016648586 | Validation loss: 0.1460997760295868
Validation loss (ends of cycles): [0.28766687]
------------------------------
Epoch: 7
Training loss: 0.1044723718212201 | Validation loss: 0.11919640749692917
Validation loss (ends of cycles): [0.28766687]
------------------------------
Epoch: 8
Training loss: 0.09750367930302253 | Validation loss: 0.10581953823566437
Validation loss (ends of cycles): [0.28766687]
------------------------------
Epoch: 9
Training loss: 0.09256000587573418 | Validation loss: 0.105472881346941
Validation loss (ends of cycles): [0.28766687]
------------------------------
Epoch: 10
Training loss: 0.08769601182295726 | Validation loss: 0.10440703853964806
Validation loss (ends of cycles): [0.28766687 0.10440704]
------------------------------
Epoch: 11
Training loss: 0.08512237668037415 | Validation loss: 0.10394519194960594
Validation loss (ends of cycles): [0.28766687 0.10440704]
------------------------------
Epoch: 12
Training loss: 0.08663272571105224 | Validation loss: 0.10312925651669502
Validation loss (ends of cycles): [0.28766687 0.10440704]
------------------------------
Epoch: 13
Training loss: 0.08722404648478214 | Validation loss: 0.10252366960048676
Validation loss (ends of cycles): [0.28766687 0.10440704]
------------------------------
Epoch: 14
Training loss: 0.08525520849686402 | Validation loss: 0.16640019416809082
Validation loss (ends of cycles): [0.28766687 0.10440704]
------------------------------
Epoch: 15
Training loss: 0.09381065976161224 | Validation loss: 0.12001441791653633
Validation loss (ends of cycles): [0.28766687 0.10440704]
------------------------------
Epoch: 16
Training loss: 0.09383911123642555 | Validation loss: 0.13745518773794174
Validation loss (ends of cycles): [0.28766687 0.10440704]
------------------------------
Epoch: 17
Training loss: 0.09079675748944283 | Validation loss: 0.11658180505037308
Validation loss (ends of cycles): [0.28766687 0.10440704]
------------------------------
Epoch: 18
Training loss: 0.0856318806226437 | Validation loss: 0.10903191938996315
Validation loss (ends of cycles): [0.28766687 0.10440704]
------------------------------
Epoch: 19
Training loss: 0.08159097685263707 | Validation loss: 0.10695850476622581
Validation loss (ends of cycles): [0.28766687 0.10440704]
------------------------------
Epoch: 20
Training loss: 0.08112306835559699 | Validation loss: 0.10882006213068962
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006]
------------------------------
Epoch: 21
Training loss: 0.08342313222013988 | Validation loss: 0.11046990752220154
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006]
------------------------------
Epoch: 22
Training loss: 0.08023825918252651 | Validation loss: 0.10669990256428719
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006]
------------------------------
Epoch: 23
Training loss: 0.08050067350268364 | Validation loss: 0.11306899785995483
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006]
------------------------------
Epoch: 24
Training loss: 0.08110488578677177 | Validation loss: 0.10976016893982887
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006]
------------------------------
Epoch: 25
Training loss: 0.0816067812534479 | Validation loss: 0.10279102995991707
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006]
------------------------------
Epoch: 26
Training loss: 0.08068692168364158 | Validation loss: 0.12279709428548813
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006]
------------------------------
Epoch: 27
Training loss: 0.07764218289118546 | Validation loss: 0.10749772936105728
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006]
------------------------------
Epoch: 28
Training loss: 0.07489511055442002 | Validation loss: 0.10868991166353226
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006]
------------------------------
Epoch: 29
Training loss: 0.07361829338165429 | Validation loss: 0.10824761912226677
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006]
------------------------------
Epoch: 30
Training loss: 0.07218144604792961 | Validation loss: 0.10803351551294327
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352]
------------------------------
Epoch: 31
Training loss: 0.07381307562956443 | Validation loss: 0.10806496813893318
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352]
------------------------------
Epoch: 32
Training loss: 0.0742212373476762 | Validation loss: 0.11222716048359871
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352]
------------------------------
Epoch: 33
Training loss: 0.07452364036670098 | Validation loss: 0.10684147849678993
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352]
------------------------------
Epoch: 34
Training loss: 0.07637788527286969 | Validation loss: 0.1159149706363678
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352]
------------------------------
Epoch: 35
Training loss: 0.0818782881475412 | Validation loss: 0.10239430144429207
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352]
------------------------------
Epoch: 36
Training loss: 0.07602285765684567 | Validation loss: 0.11685087531805038
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352]
------------------------------
Epoch: 37
Training loss: 0.07531523704528809 | Validation loss: 0.12537145614624023
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352]
------------------------------
Epoch: 38
Training loss: 0.07239608867810322 | Validation loss: 0.10199717059731483
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352]
------------------------------
Epoch: 39
Training loss: 0.06922513475784889 | Validation loss: 0.10814457386732101
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352]
------------------------------
Epoch: 40
Training loss: 0.06899523219236961 | Validation loss: 0.1046244278550148
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443]
------------------------------
Epoch: 41
Training loss: 0.06658756675628516 | Validation loss: 0.1013033427298069
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443]
------------------------------
Epoch: 42
Training loss: 0.06902540790346953 | Validation loss: 0.10858236253261566
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443]
------------------------------
Epoch: 43
Training loss: 0.06836868163484794 | Validation loss: 0.1024441346526146
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443]
------------------------------
Epoch: 44
Training loss: 0.07284962471861106 | Validation loss: 0.10861896350979805
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443]
------------------------------
Epoch: 45
Training loss: 0.07337845202821952 | Validation loss: 0.11208184063434601
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443]
------------------------------
Epoch: 46
Training loss: 0.07648678697072543 | Validation loss: 0.10931962355971336
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443]
------------------------------
Epoch: 47
Training loss: 0.06974261884505932 | Validation loss: 0.13788466900587082
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443]
------------------------------
Epoch: 48
Training loss: 0.06619262351439549 | Validation loss: 0.11117371916770935
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443]
------------------------------
Epoch: 49
Training loss: 0.06400956414066829 | Validation loss: 0.10756627842783928
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443]
------------------------------
Epoch: 50
Training loss: 0.06536509698400131 | Validation loss: 0.10626817867159843
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818]
------------------------------
Epoch: 51
Training loss: 0.06240379179899509 | Validation loss: 0.10504582896828651
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818]
------------------------------
Epoch: 52
Training loss: 0.0661077255812975 | Validation loss: 0.10221891477704048
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818]
------------------------------
Epoch: 53
Training loss: 0.06295406073331833 | Validation loss: 0.11193358525633812
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818]
------------------------------
Epoch: 54
Training loss: 0.06823232712653968 | Validation loss: 0.09892533719539642
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818]
------------------------------
Epoch: 55
Training loss: 0.06969980093149039 | Validation loss: 0.10674293711781502
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818]
------------------------------
Epoch: 56
Training loss: 0.07000684480254467 | Validation loss: 0.1276056095957756
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818]
------------------------------
Epoch: 57
Training loss: 0.07000288166678868 | Validation loss: 0.10634531080722809
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818]
------------------------------
Epoch: 58
Training loss: 0.06597055552097467 | Validation loss: 0.10050374269485474
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818]
------------------------------
Epoch: 59
Training loss: 0.06105187821846742 | Validation loss: 0.09869374707341194
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818]
------------------------------
Epoch: 60
Training loss: 0.06096246236791977 | Validation loss: 0.10020382329821587
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382]
------------------------------
Epoch: 61
Training loss: 0.06011223420500755 | Validation loss: 0.10638590157032013
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382]
------------------------------
Epoch: 62
Training loss: 0.06001588931450477 | Validation loss: 0.10041772574186325
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382]
------------------------------
Epoch: 63
Training loss: 0.05770640648328341 | Validation loss: 0.10693498328328133
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382]
------------------------------
Epoch: 64
Training loss: 0.06082365203362245 | Validation loss: 0.10736185312271118
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382]
------------------------------
Epoch: 65
Training loss: 0.06504335684271959 | Validation loss: 0.10069963335990906
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382]
------------------------------
Epoch: 66
Training loss: 0.06329144451480645 | Validation loss: 0.12001533806324005
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382]
------------------------------
Epoch: 67
Training loss: 0.060021109879016876 | Validation loss: 0.13025841116905212
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382]
------------------------------
Epoch: 68
Training loss: 0.05991259618447377 | Validation loss: 0.1112692691385746
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382]
------------------------------
Epoch: 69
Training loss: 0.051523296878888056 | Validation loss: 0.10381150618195534
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382]
------------------------------
Epoch: 70
Training loss: 0.05670847801061777 | Validation loss: 0.10544414073228836
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382 0.10544414]
------------------------------
Epoch: 71
Training loss: 0.05006153193803934 | Validation loss: 0.1040196605026722
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382 0.10544414]
------------------------------
Epoch: 72
Training loss: 0.053257113752456814 | Validation loss: 0.10596621409058571
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382 0.10544414]
------------------------------
Epoch: 73
Training loss: 0.052096135914325714 | Validation loss: 0.10098161175847054
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382 0.10544414]
------------------------------
Epoch: 74
Training loss: 0.05899054528428958 | Validation loss: 0.15640872716903687
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382 0.10544414]
------------------------------
Epoch: 75
Training loss: 0.06283879165466015 | Validation loss: 0.11133874207735062
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382 0.10544414]
------------------------------
Epoch: 76
Training loss: 0.06475053956875435 | Validation loss: 0.1269955411553383
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382 0.10544414]
------------------------------
Epoch: 77
Training loss: 0.05856200307607651 | Validation loss: 0.1232197992503643
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382 0.10544414]
------------------------------
Epoch: 78
Training loss: 0.0522953188763215 | Validation loss: 0.10317911207675934
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382 0.10544414]
------------------------------
Epoch: 79
Training loss: 0.050779332335178666 | Validation loss: 0.10657905414700508
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382 0.10544414]
------------------------------
Epoch: 80
Training loss: 0.05063657720501606 | Validation loss: 0.10991430655121803
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382 0.10544414 0.10991431]
------------------------------
Epoch: 81
Training loss: 0.046154147443863064 | Validation loss: 0.12377838045358658
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382 0.10544414 0.10991431]
------------------------------
Epoch: 82
Training loss: 0.04866070572573405 | Validation loss: 0.11358434334397316
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382 0.10544414 0.10991431]
------------------------------
Epoch: 83
Training loss: 0.04941498688780344 | Validation loss: 0.1076432652771473
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382 0.10544414 0.10991431]
------------------------------
Epoch: 84
Training loss: 0.05149089373075045 | Validation loss: 0.1143205277621746
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382 0.10544414 0.10991431]
------------------------------
Epoch: 85
Training loss: 0.054084719373629644 | Validation loss: 0.1442350260913372
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382 0.10544414 0.10991431]
------------------------------
Epoch: 86
Training loss: 0.0523872788135822 | Validation loss: 0.14174965769052505
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382 0.10544414 0.10991431]
------------------------------
Epoch: 87
Training loss: 0.049203543995435424 | Validation loss: 0.10664897784590721
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382 0.10544414 0.10991431]
------------------------------
Epoch: 88
Training loss: 0.04649185045407368 | Validation loss: 0.11035742238163948
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382 0.10544414 0.10991431]
------------------------------
Epoch: 89
Training loss: 0.04299872726775133 | Validation loss: 0.11943933367729187
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382 0.10544414 0.10991431]
------------------------------
Epoch: 90
Training loss: 0.04394056762640293 | Validation loss: 0.11714010313153267
Validation loss (ends of cycles): [0.28766687 0.10440704 0.10882006 0.10803352 0.10462443 0.10626818
 0.10020382 0.10544414 0.10991431 0.1171401 ]
Early stopping!
--------------------------------------------------------------------------------
Seed: 10 | Size: 1000
--------------------------------------------------------------------------------
------------------------------
Epoch: 0
Training loss: 0.27114631694096786 | Validation loss: 0.3013191173473994
Validation loss (ends of cycles): [0.30131912]
------------------------------
Epoch: 1
Training loss: 0.251861857680174 | Validation loss: 0.28458447754383087
Validation loss (ends of cycles): [0.30131912]
------------------------------
Epoch: 2
Training loss: 0.21546687586949423 | Validation loss: 0.284496545791626
Validation loss (ends of cycles): [0.30131912]
------------------------------
Epoch: 3
Training loss: 0.1699894041969226 | Validation loss: 0.21031304200490317
Validation loss (ends of cycles): [0.30131912]
------------------------------
Epoch: 4
Training loss: 0.13458524501094452 | Validation loss: 0.08553920437892278
Validation loss (ends of cycles): [0.30131912]
------------------------------
Epoch: 5
Training loss: 0.11567006145532314 | Validation loss: 0.14232793947060904
Validation loss (ends of cycles): [0.30131912]
------------------------------
Epoch: 6
Training loss: 0.10996172233269765 | Validation loss: 0.10632145653168361
Validation loss (ends of cycles): [0.30131912]
------------------------------
Epoch: 7
Training loss: 0.1118621499492572 | Validation loss: 0.08670407781998317
Validation loss (ends of cycles): [0.30131912]
------------------------------
Epoch: 8
Training loss: 0.10521372522299106 | Validation loss: 0.09393906593322754
Validation loss (ends of cycles): [0.30131912]
------------------------------
Epoch: 9
Training loss: 0.10288918677430886 | Validation loss: 0.08820493270953496
Validation loss (ends of cycles): [0.30131912]
------------------------------
Epoch: 10
Training loss: 0.10211293800519063 | Validation loss: 0.088161401450634
Validation loss (ends of cycles): [0.30131912 0.0881614 ]
------------------------------
Epoch: 11
Training loss: 0.10041159792588307 | Validation loss: 0.08792162934939067
Validation loss (ends of cycles): [0.30131912 0.0881614 ]
------------------------------
Epoch: 12
Training loss: 0.10152667130415256 | Validation loss: 0.08688186854124069
Validation loss (ends of cycles): [0.30131912 0.0881614 ]
------------------------------
Epoch: 13
Training loss: 0.10256221446280296 | Validation loss: 0.10419053584337234
Validation loss (ends of cycles): [0.30131912 0.0881614 ]
------------------------------
Epoch: 14
Training loss: 0.1035890977543134 | Validation loss: 0.0830913856625557
Validation loss (ends of cycles): [0.30131912 0.0881614 ]
------------------------------
Epoch: 15
Training loss: 0.10162635978597861 | Validation loss: 0.10780499627192815
Validation loss (ends of cycles): [0.30131912 0.0881614 ]
------------------------------
Epoch: 16
Training loss: 0.10166545398533344 | Validation loss: 0.09875310709079106
Validation loss (ends of cycles): [0.30131912 0.0881614 ]
------------------------------
Epoch: 17
Training loss: 0.10068163562279481 | Validation loss: 0.11237562447786331
Validation loss (ends of cycles): [0.30131912 0.0881614 ]
------------------------------
Epoch: 18
Training loss: 0.09774628596810195 | Validation loss: 0.08589743822813034
Validation loss (ends of cycles): [0.30131912 0.0881614 ]
------------------------------
Epoch: 19
Training loss: 0.09629813982890202 | Validation loss: 0.08379857987165451
Validation loss (ends of cycles): [0.30131912 0.0881614 ]
------------------------------
Epoch: 20
Training loss: 0.0934074344829871 | Validation loss: 0.08416049679120381
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605 ]
------------------------------
Epoch: 21
Training loss: 0.09412454641782321 | Validation loss: 0.08251916865507762
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605 ]
------------------------------
Epoch: 22
Training loss: 0.09572084749547335 | Validation loss: 0.08353349069754283
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605 ]
------------------------------
Epoch: 23
Training loss: 0.09290660158372842 | Validation loss: 0.0896812950571378
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605 ]
------------------------------
Epoch: 24
Training loss: 0.09400351259570855 | Validation loss: 0.11157957216103871
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605 ]
------------------------------
Epoch: 25
Training loss: 0.09808041814428109 | Validation loss: 0.09437836209932964
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605 ]
------------------------------
Epoch: 26
Training loss: 0.09618092586214726 | Validation loss: 0.07646445681651433
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605 ]
------------------------------
Epoch: 27
Training loss: 0.09161735111131118 | Validation loss: 0.08044230192899704
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605 ]
------------------------------
Epoch: 28
Training loss: 0.08888172100369747 | Validation loss: 0.08159844825665157
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605 ]
------------------------------
Epoch: 29
Training loss: 0.08454665813881618 | Validation loss: 0.0776438241203626
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605 ]
------------------------------
Epoch: 30
Training loss: 0.08237259147258905 | Validation loss: 0.07378115753332774
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116]
------------------------------
Epoch: 31
Training loss: 0.08362751325162557 | Validation loss: 0.07990996291240056
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116]
------------------------------
Epoch: 32
Training loss: 0.083226439757989 | Validation loss: 0.10914879540602367
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116]
------------------------------
Epoch: 33
Training loss: 0.08491195523394988 | Validation loss: 0.07139277209838231
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116]
------------------------------
Epoch: 34
Training loss: 0.08520169427188543 | Validation loss: 0.08888460695743561
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116]
------------------------------
Epoch: 35
Training loss: 0.0905728626709718 | Validation loss: 0.08219081163406372
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116]
------------------------------
Epoch: 36
Training loss: 0.08954568350544342 | Validation loss: 0.07995042701562245
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116]
------------------------------
Epoch: 37
Training loss: 0.08245866330197224 | Validation loss: 0.10690229882796605
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116]
------------------------------
Epoch: 38
Training loss: 0.08004418946802616 | Validation loss: 0.0655438502629598
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116]
------------------------------
Epoch: 39
Training loss: 0.07748245619810544 | Validation loss: 0.07324954122304916
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116]
------------------------------
Epoch: 40
Training loss: 0.07241358321446639 | Validation loss: 0.06766582901279132
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583]
------------------------------
Epoch: 41
Training loss: 0.07349490660887498 | Validation loss: 0.08220083763202031
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583]
------------------------------
Epoch: 42
Training loss: 0.07459167777918853 | Validation loss: 0.06816854948798816
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583]
------------------------------
Epoch: 43
Training loss: 0.075133565813303 | Validation loss: 0.06550999979178111
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583]
------------------------------
Epoch: 44
Training loss: 0.07938563565795238 | Validation loss: 0.17636031409104666
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583]
------------------------------
Epoch: 45
Training loss: 0.08100382424890995 | Validation loss: 0.14928263425827026
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583]
------------------------------
Epoch: 46
Training loss: 0.08009787734884483 | Validation loss: 0.19112283488114676
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583]
------------------------------
Epoch: 47
Training loss: 0.07215518289460586 | Validation loss: 0.06750130653381348
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583]
------------------------------
Epoch: 48
Training loss: 0.06909940305810708 | Validation loss: 0.06105370571215948
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583]
------------------------------
Epoch: 49
Training loss: 0.06814099848270416 | Validation loss: 0.06708964208761851
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583]
------------------------------
Epoch: 50
Training loss: 0.0647653667972638 | Validation loss: 0.06590079019467036
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079]
------------------------------
Epoch: 51
Training loss: 0.06436607255958594 | Validation loss: 0.06134747465451559
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079]
------------------------------
Epoch: 52
Training loss: 0.06368346626941974 | Validation loss: 0.05555027723312378
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079]
------------------------------
Epoch: 53
Training loss: 0.06528307368549016 | Validation loss: 0.10233555485804875
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079]
------------------------------
Epoch: 54
Training loss: 0.06762558365097412 | Validation loss: 0.06919782360394795
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079]
------------------------------
Epoch: 55
Training loss: 0.0683538823460157 | Validation loss: 0.09502405176560084
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079]
------------------------------
Epoch: 56
Training loss: 0.07198400795459747 | Validation loss: 0.14405548572540283
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079]
------------------------------
Epoch: 57
Training loss: 0.06737143417390493 | Validation loss: 0.07190001259247462
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079]
------------------------------
Epoch: 58
Training loss: 0.06260795833972785 | Validation loss: 0.062167766193548836
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079]
------------------------------
Epoch: 59
Training loss: 0.05951648993560901 | Validation loss: 0.07571763545274734
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079]
------------------------------
Epoch: 60
Training loss: 0.05508949851187376 | Validation loss: 0.0668637715280056
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377]
------------------------------
Epoch: 61
Training loss: 0.052382624636475854 | Validation loss: 0.09922036528587341
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377]
------------------------------
Epoch: 62
Training loss: 0.056416381580325276 | Validation loss: 0.09052975972493489
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377]
------------------------------
Epoch: 63
Training loss: 0.057161668745371014 | Validation loss: 0.07923702895641327
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377]
------------------------------
Epoch: 64
Training loss: 0.06068163279157419 | Validation loss: 0.07965557028849919
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377]
------------------------------
Epoch: 65
Training loss: 0.06255157041148497 | Validation loss: 0.30685415863990784
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377]
------------------------------
Epoch: 66
Training loss: 0.06131344331571689 | Validation loss: 0.0656268410384655
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377]
------------------------------
Epoch: 67
Training loss: 0.06062774546444416 | Validation loss: 0.08304284016291301
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377]
------------------------------
Epoch: 68
Training loss: 0.05327921833556432 | Validation loss: 0.14299276967843375
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377]
------------------------------
Epoch: 69
Training loss: 0.05078572436020924 | Validation loss: 0.06972592696547508
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377]
------------------------------
Epoch: 70
Training loss: 0.04483967346067612 | Validation loss: 0.07364171991745631
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172]
------------------------------
Epoch: 71
Training loss: 0.047769702469500214 | Validation loss: 0.08219071726004283
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172]
------------------------------
Epoch: 72
Training loss: 0.04863498230966238 | Validation loss: 0.15322100122769675
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172]
------------------------------
Epoch: 73
Training loss: 0.04958262351843027 | Validation loss: 0.08630643784999847
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172]
------------------------------
Epoch: 74
Training loss: 0.05238688429101156 | Validation loss: 0.12523938218752542
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172]
------------------------------
Epoch: 75
Training loss: 0.05482054688036442 | Validation loss: 0.07240167011817296
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172]
------------------------------
Epoch: 76
Training loss: 0.05516412023168344 | Validation loss: 0.08454832683006923
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172]
------------------------------
Epoch: 77
Training loss: 0.05065158023857153 | Validation loss: 0.08039050673445065
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172]
------------------------------
Epoch: 78
Training loss: 0.047002577007963106 | Validation loss: 0.12330764532089233
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172]
------------------------------
Epoch: 79
Training loss: 0.04479805251153616 | Validation loss: 0.06863002230723698
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172]
------------------------------
Epoch: 80
Training loss: 0.042003152605432734 | Validation loss: 0.06680252775549889
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253]
------------------------------
Epoch: 81
Training loss: 0.040736994276253075 | Validation loss: 0.06924128532409668
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253]
------------------------------
Epoch: 82
Training loss: 0.04364607404344357 | Validation loss: 0.13677890847126642
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253]
------------------------------
Epoch: 83
Training loss: 0.04239509634387035 | Validation loss: 0.06394252677758534
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253]
------------------------------
Epoch: 84
Training loss: 0.048944419536453024 | Validation loss: 0.12318740785121918
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253]
------------------------------
Epoch: 85
Training loss: 0.050307344358700976 | Validation loss: 0.07770912845929463
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253]
------------------------------
Epoch: 86
Training loss: 0.0510884178085969 | Validation loss: 0.12004852046569188
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253]
------------------------------
Epoch: 87
Training loss: 0.044719476682635456 | Validation loss: 0.08277116964260738
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253]
------------------------------
Epoch: 88
Training loss: 0.0436299704015255 | Validation loss: 0.09767195334037145
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253]
------------------------------
Epoch: 89
Training loss: 0.038958583815166585 | Validation loss: 0.06731322159369786
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253]
------------------------------
Epoch: 90
Training loss: 0.03566374486455551 | Validation loss: 0.06604948143164317
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948]
------------------------------
Epoch: 91
Training loss: 0.03600069758697198 | Validation loss: 0.06159008666872978
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948]
------------------------------
Epoch: 92
Training loss: 0.03836567363200279 | Validation loss: 0.0633199227352937
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948]
------------------------------
Epoch: 93
Training loss: 0.037643212968340285 | Validation loss: 0.12101396421591441
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948]
------------------------------
Epoch: 94
Training loss: 0.04071113252295898 | Validation loss: 0.09344382832447688
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948]
------------------------------
Epoch: 95
Training loss: 0.044794661566041984 | Validation loss: 0.16241360704104105
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948]
------------------------------
Epoch: 96
Training loss: 0.04553248654477871 | Validation loss: 0.087947316467762
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948]
------------------------------
Epoch: 97
Training loss: 0.04601407094070545 | Validation loss: 0.06197854007283846
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948]
------------------------------
Epoch: 98
Training loss: 0.037852445259117164 | Validation loss: 0.07366656263669331
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948]
------------------------------
Epoch: 99
Training loss: 0.03565156388168152 | Validation loss: 0.0590630459288756
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948]
------------------------------
Epoch: 100
Training loss: 0.03224476340871591 | Validation loss: 0.0645342580974102
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426]
------------------------------
Epoch: 101
Training loss: 0.03184994045071877 | Validation loss: 0.06282762189706166
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426]
------------------------------
Epoch: 102
Training loss: 0.032962807740729586 | Validation loss: 0.06401825199524562
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426]
------------------------------
Epoch: 103
Training loss: 0.0340998714359907 | Validation loss: 0.07556230823198955
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426]
------------------------------
Epoch: 104
Training loss: 0.03651317825111059 | Validation loss: 0.07191591709852219
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426]
------------------------------
Epoch: 105
Training loss: 0.03656492310647781 | Validation loss: 0.1600353717803955
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426]
------------------------------
Epoch: 106
Training loss: 0.04045305587351322 | Validation loss: 0.08703135202328365
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426]
------------------------------
Epoch: 107
Training loss: 0.03643202502280474 | Validation loss: 0.07275040199359258
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426]
------------------------------
Epoch: 108
Training loss: 0.03348856677229588 | Validation loss: 0.06988890593250592
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426]
------------------------------
Epoch: 109
Training loss: 0.030310515171060197 | Validation loss: 0.061166045566399894
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426]
------------------------------
Epoch: 110
Training loss: 0.027753474978873365 | Validation loss: 0.06391521046559016
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521]
------------------------------
Epoch: 111
Training loss: 0.02818346696977432 | Validation loss: 0.05979646369814873
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521]
------------------------------
Epoch: 112
Training loss: 0.02923572829996164 | Validation loss: 0.0680171325802803
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521]
------------------------------
Epoch: 113
Training loss: 0.030005195894493505 | Validation loss: 0.06576111788551013
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521]
------------------------------
Epoch: 114
Training loss: 0.03573801576231535 | Validation loss: 0.056423703829447426
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521]
------------------------------
Epoch: 115
Training loss: 0.036100911119809516 | Validation loss: 0.07376208404699962
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521]
------------------------------
Epoch: 116
Training loss: 0.03690812213776203 | Validation loss: 0.0762839342157046
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521]
------------------------------
Epoch: 117
Training loss: 0.03343923941541176 | Validation loss: 0.061920154839754105
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521]
------------------------------
Epoch: 118
Training loss: 0.02978512293730791 | Validation loss: 0.07674552748600642
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521]
------------------------------
Epoch: 119
Training loss: 0.029101334512233734 | Validation loss: 0.06387490406632423
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521]
------------------------------
Epoch: 120
Training loss: 0.026124813271543153 | Validation loss: 0.06534932677944501
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933]
------------------------------
Epoch: 121
Training loss: 0.025870294095231935 | Validation loss: 0.06444216519594193
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933]
------------------------------
Epoch: 122
Training loss: 0.025658548558847263 | Validation loss: 0.06507259979844093
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933]
------------------------------
Epoch: 123
Training loss: 0.028533661523117468 | Validation loss: 0.06774731601277988
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933]
------------------------------
Epoch: 124
Training loss: 0.02891709805967716 | Validation loss: 0.0724047174056371
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933]
------------------------------
Epoch: 125
Training loss: 0.03162456948596697 | Validation loss: 0.06366992741823196
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933]
------------------------------
Epoch: 126
Training loss: 0.03268601781187149 | Validation loss: 0.0641860527296861
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933]
------------------------------
Epoch: 127
Training loss: 0.03037001765691317 | Validation loss: 0.07524159799019496
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933]
------------------------------
Epoch: 128
Training loss: 0.02665410556185704 | Validation loss: 0.06862633923689525
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933]
------------------------------
Epoch: 129
Training loss: 0.02404931803735403 | Validation loss: 0.06612421944737434
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933]
------------------------------
Epoch: 130
Training loss: 0.023632952441962864 | Validation loss: 0.06416553010543187
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933 0.06416553]
------------------------------
Epoch: 131
Training loss: 0.023884588565963965 | Validation loss: 0.06173599263032278
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933 0.06416553]
------------------------------
Epoch: 132
Training loss: 0.02424594580840606 | Validation loss: 0.06624979277451833
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933 0.06416553]
------------------------------
Epoch: 133
Training loss: 0.025196982762561396 | Validation loss: 0.061767312387625374
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933 0.06416553]
------------------------------
Epoch: 134
Training loss: 0.02775773348716589 | Validation loss: 0.09692556907733281
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933 0.06416553]
------------------------------
Epoch: 135
Training loss: 0.032594263123778194 | Validation loss: 0.08224088450272878
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933 0.06416553]
------------------------------
Epoch: 136
Training loss: 0.029487412207974836 | Validation loss: 0.07490686575571696
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933 0.06416553]
------------------------------
Epoch: 137
Training loss: 0.02741387515113904 | Validation loss: 0.06623867899179459
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933 0.06416553]
------------------------------
Epoch: 138
Training loss: 0.027188565940237962 | Validation loss: 0.06423299262921016
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933 0.06416553]
------------------------------
Epoch: 139
Training loss: 0.022550587363254566 | Validation loss: 0.06976987918217976
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933 0.06416553]
------------------------------
Epoch: 140
Training loss: 0.022771345952955577 | Validation loss: 0.06576516106724739
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933 0.06416553 0.06576516]
------------------------------
Epoch: 141
Training loss: 0.02289973869203375 | Validation loss: 0.0628873569269975
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933 0.06416553 0.06576516]
------------------------------
Epoch: 142
Training loss: 0.023526311493836917 | Validation loss: 0.07104157408078511
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933 0.06416553 0.06576516]
------------------------------
Epoch: 143
Training loss: 0.024283254232544165 | Validation loss: 0.08539532621701558
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933 0.06416553 0.06576516]
------------------------------
Epoch: 144
Training loss: 0.026763396409268562 | Validation loss: 0.06554212172826131
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933 0.06416553 0.06576516]
------------------------------
Epoch: 145
Training loss: 0.03026583532874401 | Validation loss: 0.11454411596059799
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933 0.06416553 0.06576516]
------------------------------
Epoch: 146
Training loss: 0.028293991676316813 | Validation loss: 0.08389495313167572
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933 0.06416553 0.06576516]
------------------------------
Epoch: 147
Training loss: 0.02770799584686756 | Validation loss: 0.06585045903921127
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933 0.06416553 0.06576516]
------------------------------
Epoch: 148
Training loss: 0.023221183926440202 | Validation loss: 0.06650333975752194
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933 0.06416553 0.06576516]
------------------------------
Epoch: 149
Training loss: 0.02247353306470009 | Validation loss: 0.08126651247342427
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933 0.06416553 0.06576516]
------------------------------
Epoch: 150
Training loss: 0.019623086751940157 | Validation loss: 0.06778747588396072
Validation loss (ends of cycles): [0.30131912 0.0881614  0.0841605  0.07378116 0.06766583 0.06590079
 0.06686377 0.07364172 0.06680253 0.06604948 0.06453426 0.06391521
 0.06534933 0.06416553 0.06576516 0.06778748]
--------------------------------------------------------------------------------
Seed: 10 | Size: 2000
--------------------------------------------------------------------------------
------------------------------
Epoch: 0
Training loss: 0.1618801434250439 | Validation loss: 0.1752209154268106
Validation loss (ends of cycles): [0.17522092]
------------------------------
Epoch: 1
Training loss: 0.14574233997686237 | Validation loss: 0.15481133138140044
Validation loss (ends of cycles): [0.17522092]
------------------------------
Epoch: 2
Training loss: 0.11868300873274897 | Validation loss: 0.11752228438854218
Validation loss (ends of cycles): [0.17522092]
------------------------------
Epoch: 3
Training loss: 0.10331503433339737 | Validation loss: 0.11742908507585526
Validation loss (ends of cycles): [0.17522092]
------------------------------
Epoch: 4
Training loss: 0.09970792346433097 | Validation loss: 0.10259843369325002
Validation loss (ends of cycles): [0.17522092]
------------------------------
Epoch: 5
Training loss: 0.09703811111987806 | Validation loss: 0.103461354970932
Validation loss (ends of cycles): [0.17522092]
------------------------------
Epoch: 6
Training loss: 0.09149395820556902 | Validation loss: 0.10043870781858762
Validation loss (ends of cycles): [0.17522092]
------------------------------
Epoch: 7
Training loss: 0.09101413066188495 | Validation loss: 0.12704111635684967
Validation loss (ends of cycles): [0.17522092]
------------------------------
Epoch: 8
Training loss: 0.08387162901607215 | Validation loss: 0.10820480187733968
Validation loss (ends of cycles): [0.17522092]
------------------------------
Epoch: 9
Training loss: 0.0832572073913088 | Validation loss: 0.1032949797809124
Validation loss (ends of cycles): [0.17522092]
------------------------------
Epoch: 10
Training loss: 0.07924356764438105 | Validation loss: 0.09691472351551056
Validation loss (ends of cycles): [0.17522092 0.09691472]
------------------------------
Epoch: 11
Training loss: 0.08008078003630918 | Validation loss: 0.0957989643017451
Validation loss (ends of cycles): [0.17522092 0.09691472]
------------------------------
Epoch: 12
Training loss: 0.08209384254672948 | Validation loss: 0.11568646629651387
Validation loss (ends of cycles): [0.17522092 0.09691472]
------------------------------
Epoch: 13
Training loss: 0.08243749655929267 | Validation loss: 0.09991084039211273
Validation loss (ends of cycles): [0.17522092 0.09691472]
------------------------------
Epoch: 14
Training loss: 0.08606560250707701 | Validation loss: 0.1379929060737292
Validation loss (ends of cycles): [0.17522092 0.09691472]
------------------------------
Epoch: 15
Training loss: 0.0824899130738249 | Validation loss: 0.09170163857440154
Validation loss (ends of cycles): [0.17522092 0.09691472]
------------------------------
Epoch: 16
Training loss: 0.0803560282961995 | Validation loss: 0.09212791919708252
Validation loss (ends of cycles): [0.17522092 0.09691472]
------------------------------
Epoch: 17
Training loss: 0.07607325809259041 | Validation loss: 0.09057196353872617
Validation loss (ends of cycles): [0.17522092 0.09691472]
------------------------------
Epoch: 18
Training loss: 0.07439241616749297 | Validation loss: 0.09052857694526513
Validation loss (ends of cycles): [0.17522092 0.09691472]
------------------------------
Epoch: 19
Training loss: 0.07024018408036699 | Validation loss: 0.09308399073779583
Validation loss (ends of cycles): [0.17522092 0.09691472]
------------------------------
Epoch: 20
Training loss: 0.06798860613329738 | Validation loss: 0.08868985312680404
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985]
------------------------------
Epoch: 21
Training loss: 0.06862006760111042 | Validation loss: 0.09170038687686126
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985]
------------------------------
Epoch: 22
Training loss: 0.06869713041712255 | Validation loss: 0.08607339983185132
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985]
------------------------------
Epoch: 23
Training loss: 0.07040536199130264 | Validation loss: 0.08300078163544337
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985]
------------------------------
Epoch: 24
Training loss: 0.06987133454166207 | Validation loss: 0.09090227695802848
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985]
------------------------------
Epoch: 25
Training loss: 0.0716366690090474 | Validation loss: 0.09582946076989174
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985]
------------------------------
Epoch: 26
Training loss: 0.06975229413193815 | Validation loss: 0.08876596515377362
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985]
------------------------------
Epoch: 27
Training loss: 0.06483151796547805 | Validation loss: 0.08653237794836362
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985]
------------------------------
Epoch: 28
Training loss: 0.06122077154178245 | Validation loss: 0.08586440918346246
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985]
------------------------------
Epoch: 29
Training loss: 0.059484159245210534 | Validation loss: 0.09282346442341805
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985]
------------------------------
Epoch: 30
Training loss: 0.05785673750820113 | Validation loss: 0.0816469881683588
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699]
------------------------------
Epoch: 31
Training loss: 0.0590196952004643 | Validation loss: 0.09680157403151195
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699]
------------------------------
Epoch: 32
Training loss: 0.05919191201089644 | Validation loss: 0.08778925302127998
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699]
------------------------------
Epoch: 33
Training loss: 0.05938986396672679 | Validation loss: 0.09211701527237892
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699]
------------------------------
Epoch: 34
Training loss: 0.061850913380290945 | Validation loss: 0.0910467089464267
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699]
------------------------------
Epoch: 35
Training loss: 0.06223887621479876 | Validation loss: 0.10479206964373589
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699]
------------------------------
Epoch: 36
Training loss: 0.06194849370741377 | Validation loss: 0.08416965107123058
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699]
------------------------------
Epoch: 37
Training loss: 0.05916060693562031 | Validation loss: 0.09831625409424305
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699]
------------------------------
Epoch: 38
Training loss: 0.05644529353023744 | Validation loss: 0.0976157213250796
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699]
------------------------------
Epoch: 39
Training loss: 0.05409847791580593 | Validation loss: 0.07766535754005115
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699]
------------------------------
Epoch: 40
Training loss: 0.05131756507006346 | Validation loss: 0.07610398282607396
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398]
------------------------------
Epoch: 41
Training loss: 0.05128737677838288 | Validation loss: 0.07781509744624297
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398]
------------------------------
Epoch: 42
Training loss: 0.052724012411108204 | Validation loss: 0.07516794838011265
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398]
------------------------------
Epoch: 43
Training loss: 0.05487711676487736 | Validation loss: 0.10514147579669952
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398]
------------------------------
Epoch: 44
Training loss: 0.054588435363827965 | Validation loss: 0.08979047338167827
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398]
------------------------------
Epoch: 45
Training loss: 0.05867484726888292 | Validation loss: 0.094602112347881
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398]
------------------------------
Epoch: 46
Training loss: 0.056937527313244106 | Validation loss: 0.08283252144853274
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398]
------------------------------
Epoch: 47
Training loss: 0.05434086024030751 | Validation loss: 0.11367898931105931
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398]
------------------------------
Epoch: 48
Training loss: 0.05096290885087322 | Validation loss: 0.08029752473036449
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398]
------------------------------
Epoch: 49
Training loss: 0.049206791336045545 | Validation loss: 0.07943053916096687
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398]
------------------------------
Epoch: 50
Training loss: 0.046280208025492875 | Validation loss: 0.07776264660060406
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265]
------------------------------
Epoch: 51
Training loss: 0.045652902703367026 | Validation loss: 0.079695966715614
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265]
------------------------------
Epoch: 52
Training loss: 0.04672223851815158 | Validation loss: 0.07726358249783516
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265]
------------------------------
Epoch: 53
Training loss: 0.048943011930175855 | Validation loss: 0.10645054529110591
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265]
------------------------------
Epoch: 54
Training loss: 0.050548168921879695 | Validation loss: 0.08036942842106025
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265]
------------------------------
Epoch: 55
Training loss: 0.052031887001266666 | Validation loss: 0.10596027101079623
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265]
------------------------------
Epoch: 56
Training loss: 0.053062821793205595 | Validation loss: 0.10486301655570666
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265]
------------------------------
Epoch: 57
Training loss: 0.049803431091063166 | Validation loss: 0.10336205114920934
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265]
------------------------------
Epoch: 58
Training loss: 0.04522683836665808 | Validation loss: 0.09069225067893665
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265]
------------------------------
Epoch: 59
Training loss: 0.04493647981800285 | Validation loss: 0.07809621468186378
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265]
------------------------------
Epoch: 60
Training loss: 0.04065243574772395 | Validation loss: 0.07773153235514958
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153]
------------------------------
Epoch: 61
Training loss: 0.04221714835833101 | Validation loss: 0.07856796185175578
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153]
------------------------------
Epoch: 62
Training loss: 0.04279230407201776 | Validation loss: 0.08424294305344422
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153]
------------------------------
Epoch: 63
Training loss: 0.04577502148116336 | Validation loss: 0.09170131136973698
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153]
------------------------------
Epoch: 64
Training loss: 0.046164985600055435 | Validation loss: 0.08783026970922947
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153]
------------------------------
Epoch: 65
Training loss: 0.047662596930475795 | Validation loss: 0.08518970385193825
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153]
------------------------------
Epoch: 66
Training loss: 0.04445842315680256 | Validation loss: 0.07751050901909669
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153]
------------------------------
Epoch: 67
Training loss: 0.043508671991088814 | Validation loss: 0.08918310577670734
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153]
------------------------------
Epoch: 68
Training loss: 0.04179470343332665 | Validation loss: 0.09014023592074712
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153]
------------------------------
Epoch: 69
Training loss: 0.04049620671453429 | Validation loss: 0.07655073702335358
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153]
------------------------------
Epoch: 70
Training loss: 0.0396222028443042 | Validation loss: 0.07584572024643421
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572]
------------------------------
Epoch: 71
Training loss: 0.03717943269978551 | Validation loss: 0.07880508278807004
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572]
------------------------------
Epoch: 72
Training loss: 0.03973317442133146 | Validation loss: 0.08252802553276221
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572]
------------------------------
Epoch: 73
Training loss: 0.04049243540594391 | Validation loss: 0.09000153529147308
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572]
------------------------------
Epoch: 74
Training loss: 0.04049573047999658 | Validation loss: 0.10002372041344643
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572]
------------------------------
Epoch: 75
Training loss: 0.044400962179197985 | Validation loss: 0.07380459271371365
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572]
------------------------------
Epoch: 76
Training loss: 0.043414482737288755 | Validation loss: 0.07823999598622322
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572]
------------------------------
Epoch: 77
Training loss: 0.038739253839879646 | Validation loss: 0.08262713812291622
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572]
------------------------------
Epoch: 78
Training loss: 0.03809597438164786 | Validation loss: 0.08333441180487473
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572]
------------------------------
Epoch: 79
Training loss: 0.035754169465280046 | Validation loss: 0.08620268727342288
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572]
------------------------------
Epoch: 80
Training loss: 0.03467226956112712 | Validation loss: 0.07951171075304349
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171]
------------------------------
Epoch: 81
Training loss: 0.03466332311212432 | Validation loss: 0.07921988517045975
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171]
------------------------------
Epoch: 82
Training loss: 0.03592760305778653 | Validation loss: 0.0850036001453797
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171]
------------------------------
Epoch: 83
Training loss: 0.036594210959532684 | Validation loss: 0.07627586275339127
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171]
------------------------------
Epoch: 84
Training loss: 0.04111481717258107 | Validation loss: 0.07233001788457234
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171]
------------------------------
Epoch: 85
Training loss: 0.040537830971765755 | Validation loss: 0.09824792668223381
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171]
------------------------------
Epoch: 86
Training loss: 0.03904353648278059 | Validation loss: 0.08698291952411334
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171]
------------------------------
Epoch: 87
Training loss: 0.03920765275902608 | Validation loss: 0.0835754635433356
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171]
------------------------------
Epoch: 88
Training loss: 0.03601997488123529 | Validation loss: 0.08621295168995857
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171]
------------------------------
Epoch: 89
Training loss: 0.03377638346351245 | Validation loss: 0.07983815545837085
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171]
------------------------------
Epoch: 90
Training loss: 0.0327324680801408 | Validation loss: 0.0762726366519928
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264]
------------------------------
Epoch: 91
Training loss: 0.03165368520307774 | Validation loss: 0.08496354147791862
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264]
------------------------------
Epoch: 92
Training loss: 0.03344015656586956 | Validation loss: 0.07823167617122333
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264]
------------------------------
Epoch: 93
Training loss: 0.03262264915175882 | Validation loss: 0.0848035675783952
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264]
------------------------------
Epoch: 94
Training loss: 0.03703135953230016 | Validation loss: 0.07858358137309551
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264]
------------------------------
Epoch: 95
Training loss: 0.03976100869476795 | Validation loss: 0.10528316410879295
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264]
------------------------------
Epoch: 96
Training loss: 0.03727892081381059 | Validation loss: 0.07412890965739886
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264]
------------------------------
Epoch: 97
Training loss: 0.03417914097799974 | Validation loss: 0.07945749287803967
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264]
------------------------------
Epoch: 98
Training loss: 0.0325212050229311 | Validation loss: 0.07739517899851005
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264]
------------------------------
Epoch: 99
Training loss: 0.03150020338887093 | Validation loss: 0.08356203138828278
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264]
------------------------------
Epoch: 100
Training loss: 0.029827139632520722 | Validation loss: 0.07726814846197765
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815]
------------------------------
Epoch: 101
Training loss: 0.029131623405013598 | Validation loss: 0.07960248303910096
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815]
------------------------------
Epoch: 102
Training loss: 0.029651211191187885 | Validation loss: 0.077345651263992
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815]
------------------------------
Epoch: 103
Training loss: 0.03169410891246562 | Validation loss: 0.07760117140909036
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815]
------------------------------
Epoch: 104
Training loss: 0.03362071163514081 | Validation loss: 0.08115199704964955
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815]
------------------------------
Epoch: 105
Training loss: 0.03455840410920335 | Validation loss: 0.09261715412139893
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815]
------------------------------
Epoch: 106
Training loss: 0.034202438523518106 | Validation loss: 0.08733148748675983
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815]
------------------------------
Epoch: 107
Training loss: 0.031660995080920996 | Validation loss: 0.08463405569394429
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815]
------------------------------
Epoch: 108
Training loss: 0.030696076669675464 | Validation loss: 0.07534050444761912
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815]
------------------------------
Epoch: 109
Training loss: 0.028993837371030274 | Validation loss: 0.08378856008251508
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815]
------------------------------
Epoch: 110
Training loss: 0.027884817408288225 | Validation loss: 0.07827197946608067
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198]
------------------------------
Epoch: 111
Training loss: 0.026954872278021832 | Validation loss: 0.0768817663192749
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198]
------------------------------
Epoch: 112
Training loss: 0.027306516892185398 | Validation loss: 0.07335130125284195
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198]
------------------------------
Epoch: 113
Training loss: 0.02984478463437043 | Validation loss: 0.07736792415380478
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198]
------------------------------
Epoch: 114
Training loss: 0.030624039343320857 | Validation loss: 0.07951478908459346
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198]
------------------------------
Epoch: 115
Training loss: 0.03410996135105105 | Validation loss: 0.09334786732991536
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198]
------------------------------
Epoch: 116
Training loss: 0.03268755740467824 | Validation loss: 0.08859992027282715
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198]
------------------------------
Epoch: 117
Training loss: 0.0311201336592728 | Validation loss: 0.07669213910897572
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198]
------------------------------
Epoch: 118
Training loss: 0.02850773309667905 | Validation loss: 0.076536084835728
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198]
------------------------------
Epoch: 119
Training loss: 0.026045327659185026 | Validation loss: 0.07779801202317078
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198]
------------------------------
Epoch: 120
Training loss: 0.025879897213741846 | Validation loss: 0.07725021553536256
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022]
------------------------------
Epoch: 121
Training loss: 0.025006736263486685 | Validation loss: 0.08035719518860181
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022]
------------------------------
Epoch: 122
Training loss: 0.02521455225845178 | Validation loss: 0.07811126050849755
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022]
------------------------------
Epoch: 123
Training loss: 0.02737539444191783 | Validation loss: 0.07882043470939
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022]
------------------------------
Epoch: 124
Training loss: 0.02847288306072062 | Validation loss: 0.08231854687134425
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022]
------------------------------
Epoch: 125
Training loss: 0.03284748344152581 | Validation loss: 0.0926503340403239
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022]
------------------------------
Epoch: 126
Training loss: 0.029583644267975117 | Validation loss: 0.09233872592449188
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022]
------------------------------
Epoch: 127
Training loss: 0.02809212899164242 | Validation loss: 0.08092666106919448
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022]
------------------------------
Epoch: 128
Training loss: 0.02684425281397268 | Validation loss: 0.07736630427340667
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022]
------------------------------
Epoch: 129
Training loss: 0.026525435640531426 | Validation loss: 0.08286652341485023
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022]
------------------------------
Epoch: 130
Training loss: 0.026220443165477586 | Validation loss: 0.07740283012390137
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022 0.07740283]
------------------------------
Epoch: 131
Training loss: 0.024342383404134537 | Validation loss: 0.08926018575827281
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022 0.07740283]
------------------------------
Epoch: 132
Training loss: 0.025032186953752648 | Validation loss: 0.09302532176176707
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022 0.07740283]
------------------------------
Epoch: 133
Training loss: 0.0252848844244784 | Validation loss: 0.07852401832739513
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022 0.07740283]
------------------------------
Epoch: 134
Training loss: 0.02585682207170655 | Validation loss: 0.08621748288472493
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022 0.07740283]
------------------------------
Epoch: 135
Training loss: 0.027297223589437848 | Validation loss: 0.09372994552055995
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022 0.07740283]
------------------------------
Epoch: 136
Training loss: 0.029613328915016324 | Validation loss: 0.07932470242182414
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022 0.07740283]
------------------------------
Epoch: 137
Training loss: 0.027742074400770898 | Validation loss: 0.08915846919020017
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022 0.07740283]
------------------------------
Epoch: 138
Training loss: 0.024932185973168586 | Validation loss: 0.09161436433593433
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022 0.07740283]
------------------------------
Epoch: 139
Training loss: 0.02556919655306082 | Validation loss: 0.09213371202349663
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022 0.07740283]
------------------------------
Epoch: 140
Training loss: 0.024974070051137137 | Validation loss: 0.07853892631828785
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022 0.07740283 0.07853893]
------------------------------
Epoch: 141
Training loss: 0.022545989748894 | Validation loss: 0.09043761218587558
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022 0.07740283 0.07853893]
------------------------------
Epoch: 142
Training loss: 0.02345672288142583 | Validation loss: 0.08282394955555598
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022 0.07740283 0.07853893]
------------------------------
Epoch: 143
Training loss: 0.024325473727110553 | Validation loss: 0.11688203240434329
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022 0.07740283 0.07853893]
------------------------------
Epoch: 144
Training loss: 0.026514568840902225 | Validation loss: 0.09078371028105418
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022 0.07740283 0.07853893]
------------------------------
Epoch: 145
Training loss: 0.027508027358528447 | Validation loss: 0.08296988283594449
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022 0.07740283 0.07853893]
------------------------------
Epoch: 146
Training loss: 0.027422879256454168 | Validation loss: 0.11777617782354355
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022 0.07740283 0.07853893]
------------------------------
Epoch: 147
Training loss: 0.024141497910022736 | Validation loss: 0.09811192005872726
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022 0.07740283 0.07853893]
------------------------------
Epoch: 148
Training loss: 0.024877404100170322 | Validation loss: 0.0829291803141435
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022 0.07740283 0.07853893]
------------------------------
Epoch: 149
Training loss: 0.02423349623659662 | Validation loss: 0.08197802864015102
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022 0.07740283 0.07853893]
------------------------------
Epoch: 150
Training loss: 0.02233779888746201 | Validation loss: 0.08017101387182872
Validation loss (ends of cycles): [0.17522092 0.09691472 0.08868985 0.08164699 0.07610398 0.07776265
 0.07773153 0.07584572 0.07951171 0.07627264 0.07726815 0.07827198
 0.07725022 0.07740283 0.07853893 0.08017101]
Early stopping!
--------------------------------------------------------------------------------
Seed: 10 | Size: 5000
--------------------------------------------------------------------------------
------------------------------
Epoch: 0
Training loss: 0.21679930271595482 | Validation loss: 0.20318472385406494
Validation loss (ends of cycles): [0.20318472]
------------------------------
Epoch: 1
Training loss: 0.1668136041230104 | Validation loss: 0.11816636696457863
Validation loss (ends of cycles): [0.20318472]
------------------------------
Epoch: 2
Training loss: 0.1115807911131795 | Validation loss: 0.11339729763567448
Validation loss (ends of cycles): [0.20318472]
------------------------------
Epoch: 3
Training loss: 0.10118959852912295 | Validation loss: 0.12549301534891127
Validation loss (ends of cycles): [0.20318472]
------------------------------
Epoch: 4
Training loss: 0.09859782768281426 | Validation loss: 0.10448834672570229
Validation loss (ends of cycles): [0.20318472]
------------------------------
Epoch: 5
Training loss: 0.09670952816061147 | Validation loss: 0.10199640865127245
Validation loss (ends of cycles): [0.20318472]
------------------------------
Epoch: 6
Training loss: 0.09333653369639802 | Validation loss: 0.1617287278175354
Validation loss (ends of cycles): [0.20318472]
------------------------------
Epoch: 7
Training loss: 0.090622306662047 | Validation loss: 0.0932340698937575
Validation loss (ends of cycles): [0.20318472]
------------------------------
Epoch: 8
Training loss: 0.08455809977228247 | Validation loss: 0.08920024012525876
Validation loss (ends of cycles): [0.20318472]
------------------------------
Epoch: 9
Training loss: 0.08212190136078774 | Validation loss: 0.08311140177150568
Validation loss (ends of cycles): [0.20318472]
------------------------------
Epoch: 10
Training loss: 0.0791853968728715 | Validation loss: 0.08080732847253481
Validation loss (ends of cycles): [0.20318472 0.08080733]
------------------------------
Epoch: 11
Training loss: 0.07991005939350823 | Validation loss: 0.08217759616672993
Validation loss (ends of cycles): [0.20318472 0.08080733]
------------------------------
Epoch: 12
Training loss: 0.07952956224637707 | Validation loss: 0.09287703956166903
Validation loss (ends of cycles): [0.20318472 0.08080733]
------------------------------
Epoch: 13
Training loss: 0.08107227450750006 | Validation loss: 0.09761275698741277
Validation loss (ends of cycles): [0.20318472 0.08080733]
------------------------------
Epoch: 14
Training loss: 0.0815588255214879 | Validation loss: 0.09132462789614995
Validation loss (ends of cycles): [0.20318472 0.08080733]
------------------------------
Epoch: 15
Training loss: 0.08069409350827923 | Validation loss: 0.1021335686246554
Validation loss (ends of cycles): [0.20318472 0.08080733]
------------------------------
Epoch: 16
Training loss: 0.07824799048853671 | Validation loss: 0.09792800272504489
Validation loss (ends of cycles): [0.20318472 0.08080733]
------------------------------
Epoch: 17
Training loss: 0.07535720156051043 | Validation loss: 0.09600358655055365
Validation loss (ends of cycles): [0.20318472 0.08080733]
------------------------------
Epoch: 18
Training loss: 0.07134035119684193 | Validation loss: 0.07297346604367097
Validation loss (ends of cycles): [0.20318472 0.08080733]
------------------------------
Epoch: 19
Training loss: 0.06730523676149489 | Validation loss: 0.06935091987252236
Validation loss (ends of cycles): [0.20318472 0.08080733]
------------------------------
Epoch: 20
Training loss: 0.06681856215293483 | Validation loss: 0.06999336344500383
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336]
------------------------------
Epoch: 21
Training loss: 0.06591901843120733 | Validation loss: 0.0713375985622406
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336]
------------------------------
Epoch: 22
Training loss: 0.06646224384115437 | Validation loss: 0.07043899595737457
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336]
------------------------------
Epoch: 23
Training loss: 0.06865779084600801 | Validation loss: 0.0846476435661316
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336]
------------------------------
Epoch: 24
Training loss: 0.06966026428001602 | Validation loss: 0.08409541994333267
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336]
------------------------------
Epoch: 25
Training loss: 0.06914794953965296 | Validation loss: 0.07454115003347397
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336]
------------------------------
Epoch: 26
Training loss: 0.06687090094164601 | Validation loss: 0.12878460387388865
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336]
------------------------------
Epoch: 27
Training loss: 0.065276972186847 | Validation loss: 0.08846696764230728
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336]
------------------------------
Epoch: 28
Training loss: 0.06052851867605382 | Validation loss: 0.06936634952823321
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336]
------------------------------
Epoch: 29
Training loss: 0.05739320233816237 | Validation loss: 0.06372248244782289
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336]
------------------------------
Epoch: 30
Training loss: 0.056034141623481054 | Validation loss: 0.06305947179595629
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947]
------------------------------
Epoch: 31
Training loss: 0.05621948521437607 | Validation loss: 0.0632335669050614
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947]
------------------------------
Epoch: 32
Training loss: 0.05801960817120207 | Validation loss: 0.06638292744755744
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947]
------------------------------
Epoch: 33
Training loss: 0.059636385510052285 | Validation loss: 0.07299174045523008
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947]
------------------------------
Epoch: 34
Training loss: 0.06075522271314944 | Validation loss: 0.06601005693276724
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947]
------------------------------
Epoch: 35
Training loss: 0.06310464410976631 | Validation loss: 0.07540631766120592
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947]
------------------------------
Epoch: 36
Training loss: 0.061312416993726894 | Validation loss: 0.0751885324716568
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947]
------------------------------
Epoch: 37
Training loss: 0.05711426153251036 | Validation loss: 0.07411545490225156
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947]
------------------------------
Epoch: 38
Training loss: 0.05412266294904581 | Validation loss: 0.06427605276306471
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947]
------------------------------
Epoch: 39
Training loss: 0.05176926941031546 | Validation loss: 0.060552603627244635
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947]
------------------------------
Epoch: 40
Training loss: 0.0486671953777394 | Validation loss: 0.06134359017014503
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359]
------------------------------
Epoch: 41
Training loss: 0.05110002699212765 | Validation loss: 0.05918064539631208
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359]
------------------------------
Epoch: 42
Training loss: 0.051530711455490645 | Validation loss: 0.060740908980369566
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359]
------------------------------
Epoch: 43
Training loss: 0.05298666653084004 | Validation loss: 0.07054131577412287
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359]
------------------------------
Epoch: 44
Training loss: 0.05645980859013993 | Validation loss: 0.07371640031536421
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359]
------------------------------
Epoch: 45
Training loss: 0.05717385922536606 | Validation loss: 0.08709836999575298
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359]
------------------------------
Epoch: 46
Training loss: 0.05534336075010732 | Validation loss: 0.0652000146607558
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359]
------------------------------
Epoch: 47
Training loss: 0.05249954535677208 | Validation loss: 0.06557390540838241
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359]
------------------------------
Epoch: 48
Training loss: 0.04942367346150669 | Validation loss: 0.0619909405708313
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359]
------------------------------
Epoch: 49
Training loss: 0.04723699968748205 | Validation loss: 0.05703059149285158
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359]
------------------------------
Epoch: 50
Training loss: 0.04512434300240569 | Validation loss: 0.057147355874379475
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736]
------------------------------
Epoch: 51
Training loss: 0.04565585554232748 | Validation loss: 0.058827751750747365
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736]
------------------------------
Epoch: 52
Training loss: 0.04680207665041676 | Validation loss: 0.06423094595472018
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736]
------------------------------
Epoch: 53
Training loss: 0.04919423680664517 | Validation loss: 0.07155880083640416
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736]
------------------------------
Epoch: 54
Training loss: 0.05087821399719696 | Validation loss: 0.06875351990262667
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736]
------------------------------
Epoch: 55
Training loss: 0.053621325627204 | Validation loss: 0.12530262917280197
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736]
------------------------------
Epoch: 56
Training loss: 0.05069674629219404 | Validation loss: 0.0649179848531882
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736]
------------------------------
Epoch: 57
Training loss: 0.04963773845394296 | Validation loss: 0.0683489166200161
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736]
------------------------------
Epoch: 58
Training loss: 0.046637567097511816 | Validation loss: 0.05715753951420387
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736]
------------------------------
Epoch: 59
Training loss: 0.04308643450183192 | Validation loss: 0.05415662241478761
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736]
------------------------------
Epoch: 60
Training loss: 0.04191293368688014 | Validation loss: 0.05487368342777093
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368]
------------------------------
Epoch: 61
Training loss: 0.04219510956982693 | Validation loss: 0.056085166583458586
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368]
------------------------------
Epoch: 62
Training loss: 0.04427421400983503 | Validation loss: 0.061968022212386134
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368]
------------------------------
Epoch: 63
Training loss: 0.045907203914729626 | Validation loss: 0.06518676156798998
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368]
------------------------------
Epoch: 64
Training loss: 0.04781815905387946 | Validation loss: 0.07564251323541006
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368]
------------------------------
Epoch: 65
Training loss: 0.04938608843569211 | Validation loss: 0.08950053478280703
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368]
------------------------------
Epoch: 66
Training loss: 0.047823689848653914 | Validation loss: 0.05974354532857736
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368]
------------------------------
Epoch: 67
Training loss: 0.0452634461928071 | Validation loss: 0.05870025667051474
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368]
------------------------------
Epoch: 68
Training loss: 0.042983035578971776 | Validation loss: 0.0612156942486763
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368]
------------------------------
Epoch: 69
Training loss: 0.040601052402511356 | Validation loss: 0.055788303911685946
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368]
------------------------------
Epoch: 70
Training loss: 0.03814817611568087 | Validation loss: 0.05492573517064254
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368 0.05492574]
------------------------------
Epoch: 71
Training loss: 0.03847261846769513 | Validation loss: 0.05461364078025023
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368 0.05492574]
------------------------------
Epoch: 72
Training loss: 0.03995766030169848 | Validation loss: 0.05567261067529519
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368 0.05492574]
------------------------------
Epoch: 73
Training loss: 0.04270468235719861 | Validation loss: 0.06666113883256912
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368 0.05492574]
------------------------------
Epoch: 74
Training loss: 0.043942760514814085 | Validation loss: 0.059602606544891995
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368 0.05492574]
------------------------------
Epoch: 75
Training loss: 0.04800796254325335 | Validation loss: 0.06048930970331033
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368 0.05492574]
------------------------------
Epoch: 76
Training loss: 0.044324284302085404 | Validation loss: 0.058276225626468656
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368 0.05492574]
------------------------------
Epoch: 77
Training loss: 0.04246668216193051 | Validation loss: 0.06340857942899068
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368 0.05492574]
------------------------------
Epoch: 78
Training loss: 0.039575646829417374 | Validation loss: 0.06758413364489874
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368 0.05492574]
------------------------------
Epoch: 79
Training loss: 0.037527156377753876 | Validation loss: 0.0539743257065614
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368 0.05492574]
------------------------------
Epoch: 80
Training loss: 0.03588872146976041 | Validation loss: 0.05481154695153236
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368 0.05492574 0.05481155]
------------------------------
Epoch: 81
Training loss: 0.03649109230912107 | Validation loss: 0.054990808169047035
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368 0.05492574 0.05481155]
------------------------------
Epoch: 82
Training loss: 0.03719184185400253 | Validation loss: 0.06046265438199043
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368 0.05492574 0.05481155]
------------------------------
Epoch: 83
Training loss: 0.03993967960141306 | Validation loss: 0.05727590061724186
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368 0.05492574 0.05481155]
------------------------------
Epoch: 84
Training loss: 0.040751541143385916 | Validation loss: 0.06313362121582031
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368 0.05492574 0.05481155]
------------------------------
Epoch: 85
Training loss: 0.04366428489378822 | Validation loss: 0.06349676909546058
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368 0.05492574 0.05481155]
------------------------------
Epoch: 86
Training loss: 0.04226917906950309 | Validation loss: 0.056340149914224945
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368 0.05492574 0.05481155]
------------------------------
Epoch: 87
Training loss: 0.03940869762202886 | Validation loss: 0.06956625630458196
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368 0.05492574 0.05481155]
------------------------------
Epoch: 88
Training loss: 0.037179867033003355 | Validation loss: 0.056858163326978683
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368 0.05492574 0.05481155]
------------------------------
Epoch: 89
Training loss: 0.03474178528926504 | Validation loss: 0.054771875590085985
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368 0.05492574 0.05481155]
------------------------------
Epoch: 90
Training loss: 0.03365579730735754 | Validation loss: 0.05424569162229697
Validation loss (ends of cycles): [0.20318472 0.08080733 0.06999336 0.06305947 0.06134359 0.05714736
 0.05487368 0.05492574 0.05481155 0.05424569]
Early stopping!
--------------------------------------------------------------------------------
Seed: 10 | Size: 10000
--------------------------------------------------------------------------------
------------------------------
Epoch: 0
Training loss: 0.3208344729514573 | Validation loss: 0.318280567382944
Validation loss (ends of cycles): [0.31828057]
------------------------------
Epoch: 1
Training loss: 0.19803376548637555 | Validation loss: 0.13973277755852404
Validation loss (ends of cycles): [0.31828057]
------------------------------
Epoch: 2
Training loss: 0.10369020112328173 | Validation loss: 0.1150948526016597
Validation loss (ends of cycles): [0.31828057]
------------------------------
Epoch: 3
Training loss: 0.09644094917659216 | Validation loss: 0.10184345497139569
Validation loss (ends of cycles): [0.31828057]
------------------------------
Epoch: 4
Training loss: 0.09403398349939839 | Validation loss: 0.1028368263665972
Validation loss (ends of cycles): [0.31828057]
------------------------------
Epoch: 5
Training loss: 0.09193474704062375 | Validation loss: 0.1109759640590898
Validation loss (ends of cycles): [0.31828057]
------------------------------
Epoch: 6
Training loss: 0.08814766762528832 | Validation loss: 0.09330829733918453
Validation loss (ends of cycles): [0.31828057]
------------------------------
Epoch: 7
Training loss: 0.0837278733484623 | Validation loss: 0.08856733438783679
Validation loss (ends of cycles): [0.31828057]
------------------------------
Epoch: 8
Training loss: 0.08128497258239374 | Validation loss: 0.08120295495308678
Validation loss (ends of cycles): [0.31828057]
------------------------------
Epoch: 9
Training loss: 0.07837615451064166 | Validation loss: 0.07797302755302396
Validation loss (ends of cycles): [0.31828057]
------------------------------
Epoch: 10
Training loss: 0.07498718809893751 | Validation loss: 0.07726067600065264
Validation loss (ends of cycles): [0.31828057 0.07726068]
------------------------------
Epoch: 11
Training loss: 0.07560104029033128 | Validation loss: 0.07749038266724553
Validation loss (ends of cycles): [0.31828057 0.07726068]
------------------------------
Epoch: 12
Training loss: 0.07756251360722415 | Validation loss: 0.08580444444870126
Validation loss (ends of cycles): [0.31828057 0.07726068]
------------------------------
Epoch: 13
Training loss: 0.07727028810837137 | Validation loss: 0.0858236273814892
Validation loss (ends of cycles): [0.31828057 0.07726068]
------------------------------
Epoch: 14
Training loss: 0.07767378302716364 | Validation loss: 0.08244718315786329
Validation loss (ends of cycles): [0.31828057 0.07726068]
------------------------------
Epoch: 15
Training loss: 0.07831902486660819 | Validation loss: 0.08229162158637211
Validation loss (ends of cycles): [0.31828057 0.07726068]
------------------------------
Epoch: 16
Training loss: 0.07570499881339354 | Validation loss: 0.07988743625324347
Validation loss (ends of cycles): [0.31828057 0.07726068]
------------------------------
Epoch: 17
Training loss: 0.07201992783431463 | Validation loss: 0.07582339567357096
Validation loss (ends of cycles): [0.31828057 0.07726068]
------------------------------
Epoch: 18
Training loss: 0.07011749405472532 | Validation loss: 0.07034371587736853
Validation loss (ends of cycles): [0.31828057 0.07726068]
------------------------------
Epoch: 19
Training loss: 0.06716613902703045 | Validation loss: 0.0669062534539864
Validation loss (ends of cycles): [0.31828057 0.07726068]
------------------------------
Epoch: 20
Training loss: 0.06499362923938223 | Validation loss: 0.06683127569227383
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128]
------------------------------
Epoch: 21
Training loss: 0.06468080149919499 | Validation loss: 0.06583795465272048
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128]
------------------------------
Epoch: 22
Training loss: 0.06689566351825327 | Validation loss: 0.06987852204976411
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128]
------------------------------
Epoch: 23
Training loss: 0.06782534108387203 | Validation loss: 0.07301809394667888
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128]
------------------------------
Epoch: 24
Training loss: 0.06932942601522123 | Validation loss: 0.07833132944230375
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128]
------------------------------
Epoch: 25
Training loss: 0.0697620217138388 | Validation loss: 0.07771215652083528
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128]
------------------------------
Epoch: 26
Training loss: 0.06711326695714645 | Validation loss: 0.07463829681791108
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128]
------------------------------
Epoch: 27
Training loss: 0.06510572683975452 | Validation loss: 0.07238342150531966
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128]
------------------------------
Epoch: 28
Training loss: 0.06255879405680603 | Validation loss: 0.06996388707695336
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128]
------------------------------
Epoch: 29
Training loss: 0.05977157332501778 | Validation loss: 0.06343879846149478
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128]
------------------------------
Epoch: 30
Training loss: 0.057576970637255295 | Validation loss: 0.06144785264442707
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785]
------------------------------
Epoch: 31
Training loss: 0.05884648127642673 | Validation loss: 0.061254425552384605
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785]
------------------------------
Epoch: 32
Training loss: 0.06008706400244255 | Validation loss: 0.06730351057545893
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785]
------------------------------
Epoch: 33
Training loss: 0.06149038758979538 | Validation loss: 0.06669318663149044
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785]
------------------------------
Epoch: 34
Training loss: 0.06343353877768038 | Validation loss: 0.08957863676136937
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785]
------------------------------
Epoch: 35
Training loss: 0.06404111976551963 | Validation loss: 0.07944964103657624
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785]
------------------------------
Epoch: 36
Training loss: 0.06280158764470047 | Validation loss: 0.08468614403029968
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785]
------------------------------
Epoch: 37
Training loss: 0.06029054083986076 | Validation loss: 0.06408045762057962
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785]
------------------------------
Epoch: 38
Training loss: 0.05761603463265136 | Validation loss: 0.05974189076444198
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785]
------------------------------
Epoch: 39
Training loss: 0.05507340776462724 | Validation loss: 0.05976667499233936
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785]
------------------------------
Epoch: 40
Training loss: 0.053342985795942814 | Validation loss: 0.05916541262433447
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541]
------------------------------
Epoch: 41
Training loss: 0.054149042385974976 | Validation loss: 0.059428418761697306
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541]
------------------------------
Epoch: 42
Training loss: 0.05521612206664611 | Validation loss: 0.06161621251496775
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541]
------------------------------
Epoch: 43
Training loss: 0.05741074803013971 | Validation loss: 0.07564911323374715
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541]
------------------------------
Epoch: 44
Training loss: 0.059064230778965894 | Validation loss: 0.08295242616842533
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541]
------------------------------
Epoch: 45
Training loss: 0.06096260105149718 | Validation loss: 0.0736706999355349
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541]
------------------------------
Epoch: 46
Training loss: 0.058049106735765466 | Validation loss: 0.08584050705720639
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541]
------------------------------
Epoch: 47
Training loss: 0.05583139895424834 | Validation loss: 0.06207926982435687
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541]
------------------------------
Epoch: 48
Training loss: 0.053796297921909125 | Validation loss: 0.06523714240255027
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541]
------------------------------
Epoch: 49
Training loss: 0.05198358270654997 | Validation loss: 0.05872965193000333
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541]
------------------------------
Epoch: 50
Training loss: 0.0499456028314674 | Validation loss: 0.05686888162945879
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888]
------------------------------
Epoch: 51
Training loss: 0.05126144335876534 | Validation loss: 0.056591979195845535
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888]
------------------------------
Epoch: 52
Training loss: 0.05224344710724091 | Validation loss: 0.05856672815721611
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888]
------------------------------
Epoch: 53
Training loss: 0.054411778094496314 | Validation loss: 0.07042139074925718
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888]
------------------------------
Epoch: 54
Training loss: 0.055745888523405465 | Validation loss: 0.06894256778318307
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888]
------------------------------
Epoch: 55
Training loss: 0.05707524980117721 | Validation loss: 0.06330927410002413
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888]
------------------------------
Epoch: 56
Training loss: 0.05516272258952143 | Validation loss: 0.06743599166130197
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888]
------------------------------
Epoch: 57
Training loss: 0.05250089465985148 | Validation loss: 0.06211624667048454
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888]
------------------------------
Epoch: 58
Training loss: 0.051365907509319894 | Validation loss: 0.06060262486852448
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888]
------------------------------
Epoch: 59
Training loss: 0.04909465120973315 | Validation loss: 0.05675130376014216
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888]
------------------------------
Epoch: 60
Training loss: 0.04706054231826597 | Validation loss: 0.05575629599906247
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563 ]
------------------------------
Epoch: 61
Training loss: 0.04808880996296373 | Validation loss: 0.05616617549596162
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563 ]
------------------------------
Epoch: 62
Training loss: 0.04886479155109154 | Validation loss: 0.05892433553677181
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563 ]
------------------------------
Epoch: 63
Training loss: 0.05041125721085494 | Validation loss: 0.06897250832668667
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563 ]
------------------------------
Epoch: 64
Training loss: 0.05188799403813176 | Validation loss: 0.062462263580026295
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563 ]
------------------------------
Epoch: 65
Training loss: 0.054635593799624856 | Validation loss: 0.08355555824678519
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563 ]
------------------------------
Epoch: 66
Training loss: 0.0523650164781945 | Validation loss: 0.06177988340114725
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563 ]
------------------------------
Epoch: 67
Training loss: 0.051539598399494575 | Validation loss: 0.05858155166537597
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563 ]
------------------------------
Epoch: 68
Training loss: 0.04865856434211252 | Validation loss: 0.058884155133674884
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563 ]
------------------------------
Epoch: 69
Training loss: 0.04666972260071537 | Validation loss: 0.05566656884962115
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563 ]
------------------------------
Epoch: 70
Training loss: 0.044823936340729086 | Validation loss: 0.054462554125950254
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255]
------------------------------
Epoch: 71
Training loss: 0.0449102942824481 | Validation loss: 0.054748651667915545
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255]
------------------------------
Epoch: 72
Training loss: 0.046292417118047165 | Validation loss: 0.059005388540440594
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255]
------------------------------
Epoch: 73
Training loss: 0.048082990483857516 | Validation loss: 0.05723131852674073
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255]
------------------------------
Epoch: 74
Training loss: 0.04956973357287448 | Validation loss: 0.05968912909257001
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255]
------------------------------
Epoch: 75
Training loss: 0.051790501929702255 | Validation loss: 0.05894576591150514
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255]
------------------------------
Epoch: 76
Training loss: 0.049998988016090526 | Validation loss: 0.059786687123364414
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255]
------------------------------
Epoch: 77
Training loss: 0.048110113376531545 | Validation loss: 0.061614472932856656
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255]
------------------------------
Epoch: 78
Training loss: 0.04663481412631437 | Validation loss: 0.06338580736312373
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255]
------------------------------
Epoch: 79
Training loss: 0.04410836506473619 | Validation loss: 0.05611117794338999
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255]
------------------------------
Epoch: 80
Training loss: 0.04312163801849123 | Validation loss: 0.05400283981500001
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284]
------------------------------
Epoch: 81
Training loss: 0.043554561714986416 | Validation loss: 0.056934723437860095
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284]
------------------------------
Epoch: 82
Training loss: 0.044636147390083064 | Validation loss: 0.0660295981014597
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284]
------------------------------
Epoch: 83
Training loss: 0.0459298248894102 | Validation loss: 0.06809257998548705
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284]
------------------------------
Epoch: 84
Training loss: 0.04766980750764918 | Validation loss: 0.06455176233731467
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284]
------------------------------
Epoch: 85
Training loss: 0.04948333185899446 | Validation loss: 0.08151760183531663
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284]
------------------------------
Epoch: 86
Training loss: 0.04826224101780672 | Validation loss: 0.07454486461035137
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284]
------------------------------
Epoch: 87
Training loss: 0.04570534480662327 | Validation loss: 0.058246461867258466
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284]
------------------------------
Epoch: 88
Training loss: 0.04386360907384495 | Validation loss: 0.058009152525457845
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284]
------------------------------
Epoch: 89
Training loss: 0.04169096091512855 | Validation loss: 0.054397762720954826
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284]
------------------------------
Epoch: 90
Training loss: 0.041218897184782374 | Validation loss: 0.05259814074841039
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814]
------------------------------
Epoch: 91
Training loss: 0.040955901549263735 | Validation loss: 0.05439365038584019
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814]
------------------------------
Epoch: 92
Training loss: 0.042126013860693125 | Validation loss: 0.054666623344709134
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814]
------------------------------
Epoch: 93
Training loss: 0.043382279897534 | Validation loss: 0.054504195998968746
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814]
------------------------------
Epoch: 94
Training loss: 0.0458415110822855 | Validation loss: 0.07741936720136938
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814]
------------------------------
Epoch: 95
Training loss: 0.04743661339886076 | Validation loss: 0.05820449994042002
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814]
------------------------------
Epoch: 96
Training loss: 0.04553815280270623 | Validation loss: 0.08208692163742821
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814]
------------------------------
Epoch: 97
Training loss: 0.044244329628395286 | Validation loss: 0.05583971597511193
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814]
------------------------------
Epoch: 98
Training loss: 0.04123691922625688 | Validation loss: 0.05591851151708899
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814]
------------------------------
Epoch: 99
Training loss: 0.04009729196266161 | Validation loss: 0.05433603845022876
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814]
------------------------------
Epoch: 100
Training loss: 0.03822168787780005 | Validation loss: 0.053116938934243955
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814 0.05311694]
------------------------------
Epoch: 101
Training loss: 0.038870895644429866 | Validation loss: 0.054241204441621386
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814 0.05311694]
------------------------------
Epoch: 102
Training loss: 0.04008115804937529 | Validation loss: 0.055827350130882754
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814 0.05311694]
------------------------------
Epoch: 103
Training loss: 0.041735652746178034 | Validation loss: 0.057072933634807324
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814 0.05311694]
------------------------------
Epoch: 104
Training loss: 0.043364248979162044 | Validation loss: 0.05602561297087834
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814 0.05311694]
------------------------------
Epoch: 105
Training loss: 0.045468564585261925 | Validation loss: 0.056964905804087376
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814 0.05311694]
------------------------------
Epoch: 106
Training loss: 0.044400842112349716 | Validation loss: 0.05765183186479684
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814 0.05311694]
------------------------------
Epoch: 107
Training loss: 0.04242724046023108 | Validation loss: 0.05811212301768105
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814 0.05311694]
------------------------------
Epoch: 108
Training loss: 0.03993679110811451 | Validation loss: 0.05642743761940249
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814 0.05311694]
------------------------------
Epoch: 109
Training loss: 0.03817879260099662 | Validation loss: 0.05428962120465163
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814 0.05311694]
------------------------------
Epoch: 110
Training loss: 0.03728278963569933 | Validation loss: 0.05238519949388915
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814 0.05311694 0.0523852 ]
------------------------------
Epoch: 111
Training loss: 0.0370556244765973 | Validation loss: 0.05351860776286701
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814 0.05311694 0.0523852 ]
------------------------------
Epoch: 112
Training loss: 0.03803750030755058 | Validation loss: 0.05330655434779052
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814 0.05311694 0.0523852 ]
------------------------------
Epoch: 113
Training loss: 0.03953701080974397 | Validation loss: 0.05610055540656221
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814 0.05311694 0.0523852 ]
------------------------------
Epoch: 114
Training loss: 0.042156051558361746 | Validation loss: 0.056494539603590965
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814 0.05311694 0.0523852 ]
------------------------------
Epoch: 115
Training loss: 0.04371086021107951 | Validation loss: 0.059335493065159894
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814 0.05311694 0.0523852 ]
------------------------------
Epoch: 116
Training loss: 0.04238700263172857 | Validation loss: 0.0607527428916816
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814 0.05311694 0.0523852 ]
------------------------------
Epoch: 117
Training loss: 0.04007319592702107 | Validation loss: 0.059337090572406506
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814 0.05311694 0.0523852 ]
------------------------------
Epoch: 118
Training loss: 0.03805773765376703 | Validation loss: 0.059619187216820385
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814 0.05311694 0.0523852 ]
------------------------------
Epoch: 119
Training loss: 0.036783665865953045 | Validation loss: 0.05485150924530523
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814 0.05311694 0.0523852 ]
------------------------------
Epoch: 120
Training loss: 0.03522829567705552 | Validation loss: 0.05326138205569366
Validation loss (ends of cycles): [0.31828057 0.07726068 0.06683128 0.06144785 0.05916541 0.05686888
 0.0557563  0.05446255 0.05400284 0.05259814 0.05311694 0.0523852
 0.05326138]
Early stopping!
--------------------------------------------------------------------------------
Seed: 10 | Size: 20000
--------------------------------------------------------------------------------
------------------------------
Epoch: 0
Training loss: 0.1937587873558321 | Validation loss: 0.16695564253288403
Validation loss (ends of cycles): [0.16695564]
------------------------------
Epoch: 1
Training loss: 0.10741124820162559 | Validation loss: 0.09033931210114245
Validation loss (ends of cycles): [0.16695564]
------------------------------
Epoch: 2
Training loss: 0.0909428682097434 | Validation loss: 0.08505091312945935
Validation loss (ends of cycles): [0.16695564]
------------------------------
Epoch: 3
Training loss: 0.08447369038911731 | Validation loss: 0.08490002619331344
Validation loss (ends of cycles): [0.16695564]
------------------------------
Epoch: 4
Training loss: 0.08043632089459803 | Validation loss: 0.0835269209193556
Validation loss (ends of cycles): [0.16695564]
------------------------------
Epoch: 5
Training loss: 0.07768446769146524 | Validation loss: 0.07836407364199036
Validation loss (ends of cycles): [0.16695564]
------------------------------
Epoch: 6
Training loss: 0.07283868803413074 | Validation loss: 0.07246115702416814
Validation loss (ends of cycles): [0.16695564]
------------------------------
Epoch: 7
Training loss: 0.06800214434134419 | Validation loss: 0.07295098345269237
Validation loss (ends of cycles): [0.16695564]
------------------------------
Epoch: 8
Training loss: 0.06450905432627047 | Validation loss: 0.06473837368059576
Validation loss (ends of cycles): [0.16695564]
------------------------------
Epoch: 9
Training loss: 0.06113856292056026 | Validation loss: 0.06293978251255396
Validation loss (ends of cycles): [0.16695564]
------------------------------
Epoch: 10
Training loss: 0.05838155703843228 | Validation loss: 0.058973234833071105
Validation loss (ends of cycles): [0.16695564 0.05897323]
------------------------------
Epoch: 11
Training loss: 0.0590651414707979 | Validation loss: 0.06184358401387407
Validation loss (ends of cycles): [0.16695564 0.05897323]
------------------------------
Epoch: 12
Training loss: 0.06017314939147975 | Validation loss: 0.06425718737668112
Validation loss (ends of cycles): [0.16695564 0.05897323]
------------------------------
Epoch: 13
Training loss: 0.061172162533482385 | Validation loss: 0.06329335121993433
Validation loss (ends of cycles): [0.16695564 0.05897323]
------------------------------
Epoch: 14
Training loss: 0.06214155544043763 | Validation loss: 0.06571824806170505
Validation loss (ends of cycles): [0.16695564 0.05897323]
------------------------------
Epoch: 15
Training loss: 0.062408381161928414 | Validation loss: 0.06477978142599265
Validation loss (ends of cycles): [0.16695564 0.05897323]
------------------------------
Epoch: 16
Training loss: 0.05940165142556267 | Validation loss: 0.06300883746722288
Validation loss (ends of cycles): [0.16695564 0.05897323]
------------------------------
Epoch: 17
Training loss: 0.05636550784772317 | Validation loss: 0.06063326899158327
Validation loss (ends of cycles): [0.16695564 0.05897323]
------------------------------
Epoch: 18
Training loss: 0.053919580950598275 | Validation loss: 0.05690223664829606
Validation loss (ends of cycles): [0.16695564 0.05897323]
------------------------------
Epoch: 19
Training loss: 0.05147737494249198 | Validation loss: 0.054520211787077416
Validation loss (ends of cycles): [0.16695564 0.05897323]
------------------------------
Epoch: 20
Training loss: 0.049701670721114505 | Validation loss: 0.051985655927605796
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566]
------------------------------
Epoch: 21
Training loss: 0.050548364601191685 | Validation loss: 0.05337116988212393
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566]
------------------------------
Epoch: 22
Training loss: 0.05188654155949869 | Validation loss: 0.055069943040347936
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566]
------------------------------
Epoch: 23
Training loss: 0.05248021238522064 | Validation loss: 0.05857947808608674
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566]
------------------------------
Epoch: 24
Training loss: 0.05433177806554579 | Validation loss: 0.055786194933349624
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566]
------------------------------
Epoch: 25
Training loss: 0.05556651474852886 | Validation loss: 0.05825148323518142
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566]
------------------------------
Epoch: 26
Training loss: 0.053006139650015084 | Validation loss: 0.05651293652491611
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566]
------------------------------
Epoch: 27
Training loss: 0.0507659353499523 | Validation loss: 0.054801122726578465
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566]
------------------------------
Epoch: 28
Training loss: 0.048554422462421525 | Validation loss: 0.05305930501536319
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566]
------------------------------
Epoch: 29
Training loss: 0.04574381269117784 | Validation loss: 0.0514153082315859
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566]
------------------------------
Epoch: 30
Training loss: 0.04418007252991376 | Validation loss: 0.048620141879246945
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014]
------------------------------
Epoch: 31
Training loss: 0.045360717239111836 | Validation loss: 0.05021227791643979
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014]
------------------------------
Epoch: 32
Training loss: 0.046448042166200736 | Validation loss: 0.05336992553713029
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014]
------------------------------
Epoch: 33
Training loss: 0.04778991164217099 | Validation loss: 0.05832194040218989
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014]
------------------------------
Epoch: 34
Training loss: 0.04944647156451581 | Validation loss: 0.0614949915100608
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014]
------------------------------
Epoch: 35
Training loss: 0.050366569427990115 | Validation loss: 0.06288488016447477
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014]
------------------------------
Epoch: 36
Training loss: 0.048872271721679315 | Validation loss: 0.05419730365668472
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014]
------------------------------
Epoch: 37
Training loss: 0.04679563639711229 | Validation loss: 0.05351380425456323
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014]
------------------------------
Epoch: 38
Training loss: 0.04441320958236853 | Validation loss: 0.05135479059658552
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014]
------------------------------
Epoch: 39
Training loss: 0.042865205434916995 | Validation loss: 0.04892440996410554
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014]
------------------------------
Epoch: 40
Training loss: 0.04114158953405932 | Validation loss: 0.04640055317104908
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055]
------------------------------
Epoch: 41
Training loss: 0.04168075290407537 | Validation loss: 0.04898981227163683
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055]
------------------------------
Epoch: 42
Training loss: 0.04272683222339116 | Validation loss: 0.04995410621427653
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055]
------------------------------
Epoch: 43
Training loss: 0.04432621974813985 | Validation loss: 0.0549667902421533
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055]
------------------------------
Epoch: 44
Training loss: 0.04593426139044338 | Validation loss: 0.05504765328869485
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055]
------------------------------
Epoch: 45
Training loss: 0.04727091159693588 | Validation loss: 0.054771556371920986
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055]
------------------------------
Epoch: 46
Training loss: 0.045680517350971936 | Validation loss: 0.05265137781960923
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055]
------------------------------
Epoch: 47
Training loss: 0.04370591644550392 | Validation loss: 0.05152795839597259
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055]
------------------------------
Epoch: 48
Training loss: 0.04130219880866229 | Validation loss: 0.05182074438453766
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055]
------------------------------
Epoch: 49
Training loss: 0.03948871645878053 | Validation loss: 0.04831022197347984
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055]
------------------------------
Epoch: 50
Training loss: 0.03837553308303128 | Validation loss: 0.044728756146995646
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876]
------------------------------
Epoch: 51
Training loss: 0.03918741399484744 | Validation loss: 0.04656248438384449
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876]
------------------------------
Epoch: 52
Training loss: 0.040425572272470955 | Validation loss: 0.048068265806426085
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876]
------------------------------
Epoch: 53
Training loss: 0.041100266152585045 | Validation loss: 0.048309058506499254
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876]
------------------------------
Epoch: 54
Training loss: 0.043417033105647776 | Validation loss: 0.05213266104590474
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876]
------------------------------
Epoch: 55
Training loss: 0.044981646573784555 | Validation loss: 0.05113487236463187
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876]
------------------------------
Epoch: 56
Training loss: 0.04303855704661657 | Validation loss: 0.0511682754741949
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876]
------------------------------
Epoch: 57
Training loss: 0.04084054631395982 | Validation loss: 0.04861888168543054
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876]
------------------------------
Epoch: 58
Training loss: 0.038960600914745874 | Validation loss: 0.04695257093561323
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876]
------------------------------
Epoch: 59
Training loss: 0.03764710921970521 | Validation loss: 0.04477690320396632
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876]
------------------------------
Epoch: 60
Training loss: 0.03553954229912403 | Validation loss: 0.043018419851075136
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842]
------------------------------
Epoch: 61
Training loss: 0.03677014895959131 | Validation loss: 0.045161824052532516
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842]
------------------------------
Epoch: 62
Training loss: 0.03763243454073131 | Validation loss: 0.047717598740730366
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842]
------------------------------
Epoch: 63
Training loss: 0.039285847094255325 | Validation loss: 0.04886620066929282
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842]
------------------------------
Epoch: 64
Training loss: 0.041000166599302605 | Validation loss: 0.05117675080372576
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842]
------------------------------
Epoch: 65
Training loss: 0.042431575734778154 | Validation loss: 0.05887748026534131
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842]
------------------------------
Epoch: 66
Training loss: 0.04095806826559985 | Validation loss: 0.04888384812103029
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842]
------------------------------
Epoch: 67
Training loss: 0.03861006007105465 | Validation loss: 0.04978298546190847
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842]
------------------------------
Epoch: 68
Training loss: 0.03706407239862683 | Validation loss: 0.04655743958918672
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842]
------------------------------
Epoch: 69
Training loss: 0.03537350038481652 | Validation loss: 0.044241009476153476
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842]
------------------------------
Epoch: 70
Training loss: 0.033808498057358596 | Validation loss: 0.04274106940679383
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107]
------------------------------
Epoch: 71
Training loss: 0.03488694527048684 | Validation loss: 0.04366510390843216
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107]
------------------------------
Epoch: 72
Training loss: 0.03567285149099206 | Validation loss: 0.04591469525506622
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107]
------------------------------
Epoch: 73
Training loss: 0.03710445848567305 | Validation loss: 0.048434995899074955
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107]
------------------------------
Epoch: 74
Training loss: 0.03909004668482897 | Validation loss: 0.04879440321472653
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107]
------------------------------
Epoch: 75
Training loss: 0.0402635611626788 | Validation loss: 0.049998423476752485
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107]
------------------------------
Epoch: 76
Training loss: 0.03877175844933919 | Validation loss: 0.047474542403953116
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107]
------------------------------
Epoch: 77
Training loss: 0.03686592971108839 | Validation loss: 0.049321869518934636
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107]
------------------------------
Epoch: 78
Training loss: 0.03513916212830259 | Validation loss: 0.04538330256023951
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107]
------------------------------
Epoch: 79
Training loss: 0.03325431909195388 | Validation loss: 0.04405715048574565
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107]
------------------------------
Epoch: 80
Training loss: 0.032414121551464885 | Validation loss: 0.041407169022581035
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717]
------------------------------
Epoch: 81
Training loss: 0.03273387079694744 | Validation loss: 0.04332864193017023
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717]
------------------------------
Epoch: 82
Training loss: 0.03379245271219246 | Validation loss: 0.04393824611447359
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717]
------------------------------
Epoch: 83
Training loss: 0.036216145730347794 | Validation loss: 0.04601557598563663
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717]
------------------------------
Epoch: 84
Training loss: 0.03738668957390846 | Validation loss: 0.0478383689269162
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717]
------------------------------
Epoch: 85
Training loss: 0.03850440040260142 | Validation loss: 0.046575586066434256
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717]
------------------------------
Epoch: 86
Training loss: 0.037245836072184395 | Validation loss: 0.04904622127089584
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717]
------------------------------
Epoch: 87
Training loss: 0.035356279124581134 | Validation loss: 0.04652073737560657
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717]
------------------------------
Epoch: 88
Training loss: 0.034129417027462514 | Validation loss: 0.04397078325743215
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717]
------------------------------
Epoch: 89
Training loss: 0.032686933013564616 | Validation loss: 0.04174699555886419
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717]
------------------------------
Epoch: 90
Training loss: 0.03151373515232545 | Validation loss: 0.040432571188399664
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717 0.04043257]
------------------------------
Epoch: 91
Training loss: 0.031794840381783845 | Validation loss: 0.04145001789979767
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717 0.04043257]
------------------------------
Epoch: 92
Training loss: 0.03241383722070348 | Validation loss: 0.04404820838387598
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717 0.04043257]
------------------------------
Epoch: 93
Training loss: 0.034046458082247884 | Validation loss: 0.045810290955399215
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717 0.04043257]
------------------------------
Epoch: 94
Training loss: 0.03595437882449735 | Validation loss: 0.059114713091076465
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717 0.04043257]
------------------------------
Epoch: 95
Training loss: 0.037035941107738654 | Validation loss: 0.047888670345408876
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717 0.04043257]
------------------------------
Epoch: 96
Training loss: 0.035954072046473884 | Validation loss: 0.04650187672099523
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717 0.04043257]
------------------------------
Epoch: 97
Training loss: 0.03383453256272412 | Validation loss: 0.04721131607105857
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717 0.04043257]
------------------------------
Epoch: 98
Training loss: 0.032827389764524186 | Validation loss: 0.042917570001200625
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717 0.04043257]
------------------------------
Epoch: 99
Training loss: 0.03113685309872237 | Validation loss: 0.04149719017247359
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717 0.04043257]
------------------------------
Epoch: 100
Training loss: 0.030135778122429076 | Validation loss: 0.03988342725655489
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717 0.04043257 0.03988343]
------------------------------
Epoch: 101
Training loss: 0.03010753776944248 | Validation loss: 0.04145811442612556
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717 0.04043257 0.03988343]
------------------------------
Epoch: 102
Training loss: 0.03123408529165346 | Validation loss: 0.04319112502822751
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717 0.04043257 0.03988343]
------------------------------
Epoch: 103
Training loss: 0.032564907242898525 | Validation loss: 0.044689477470360305
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717 0.04043257 0.03988343]
------------------------------
Epoch: 104
Training loss: 0.033901535042541384 | Validation loss: 0.04784849746838996
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717 0.04043257 0.03988343]
------------------------------
Epoch: 105
Training loss: 0.03641212473511402 | Validation loss: 0.051434298160306195
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717 0.04043257 0.03988343]
------------------------------
Epoch: 106
Training loss: 0.03419514418041418 | Validation loss: 0.045509515918399156
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717 0.04043257 0.03988343]
------------------------------
Epoch: 107
Training loss: 0.0329815001140626 | Validation loss: 0.046876753043187294
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717 0.04043257 0.03988343]
------------------------------
Epoch: 108
Training loss: 0.031264307952800094 | Validation loss: 0.043929761667784895
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717 0.04043257 0.03988343]
------------------------------
Epoch: 109
Training loss: 0.030383295491631684 | Validation loss: 0.04097266080217403
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717 0.04043257 0.03988343]
------------------------------
Epoch: 110
Training loss: 0.029282903673468964 | Validation loss: 0.0398062279840049
Validation loss (ends of cycles): [0.16695564 0.05897323 0.05198566 0.04862014 0.04640055 0.04472876
 0.04301842 0.04274107 0.04140717 0.04043257 0.03988343 0.03980623]
Early stopping!
--------------------------------------------------------------------------------
Seed: 10 | Size: 30000
--------------------------------------------------------------------------------
------------------------------
Epoch: 0
Training loss: 0.1400897194288279 | Validation loss: 0.12329615446574547
Validation loss (ends of cycles): [0.12329615]
------------------------------
Epoch: 1
Training loss: 0.09554748258210327 | Validation loss: 0.08580511917962748
Validation loss (ends of cycles): [0.12329615]
------------------------------
Epoch: 2
Training loss: 0.08333759619609306 | Validation loss: 0.0800603346351315
Validation loss (ends of cycles): [0.12329615]
------------------------------
Epoch: 3
Training loss: 0.07792517879585686 | Validation loss: 0.07561177874312681
Validation loss (ends of cycles): [0.12329615]
------------------------------
Epoch: 4
Training loss: 0.07436479456526668 | Validation loss: 0.07901463929344626
Validation loss (ends of cycles): [0.12329615]
------------------------------
Epoch: 5
Training loss: 0.07109708578668927 | Validation loss: 0.06837146382998018
Validation loss (ends of cycles): [0.12329615]
------------------------------
Epoch: 6
Training loss: 0.06542817449785376 | Validation loss: 0.0787474691429559
Validation loss (ends of cycles): [0.12329615]
------------------------------
Epoch: 7
Training loss: 0.061334840936194124 | Validation loss: 0.05965573042631149
Validation loss (ends of cycles): [0.12329615]
------------------------------
Epoch: 8
Training loss: 0.057118404958103046 | Validation loss: 0.0656482653144528
Validation loss (ends of cycles): [0.12329615]
------------------------------
Epoch: 9
Training loss: 0.05406631466344391 | Validation loss: 0.05449213265057872
Validation loss (ends of cycles): [0.12329615]
------------------------------
Epoch: 10
Training loss: 0.05106979912931198 | Validation loss: 0.052335235345013
Validation loss (ends of cycles): [0.12329615 0.05233524]
------------------------------
Epoch: 11
Training loss: 0.052069277808952485 | Validation loss: 0.05276231921332724
Validation loss (ends of cycles): [0.12329615 0.05233524]
------------------------------
Epoch: 12
Training loss: 0.05316668891494996 | Validation loss: 0.05438405183308265
Validation loss (ends of cycles): [0.12329615 0.05233524]
------------------------------
Epoch: 13
Training loss: 0.054588352700107194 | Validation loss: 0.05557882632402813
Validation loss (ends of cycles): [0.12329615 0.05233524]
------------------------------
Epoch: 14
Training loss: 0.05497209098386137 | Validation loss: 0.05784036999239641
Validation loss (ends of cycles): [0.12329615 0.05233524]
------------------------------
Epoch: 15
Training loss: 0.05595910447208505 | Validation loss: 0.06857345152427169
Validation loss (ends of cycles): [0.12329615 0.05233524]
------------------------------
Epoch: 16
Training loss: 0.05293017792956609 | Validation loss: 0.07677049303756041
Validation loss (ends of cycles): [0.12329615 0.05233524]
------------------------------
Epoch: 17
Training loss: 0.05036535177526898 | Validation loss: 0.054857921183985824
Validation loss (ends of cycles): [0.12329615 0.05233524]
------------------------------
Epoch: 18
Training loss: 0.0480112096307015 | Validation loss: 0.04789778322857969
Validation loss (ends of cycles): [0.12329615 0.05233524]
------------------------------
Epoch: 19
Training loss: 0.045682432433884396 | Validation loss: 0.04683302770204404
Validation loss (ends of cycles): [0.12329615 0.05233524]
------------------------------
Epoch: 20
Training loss: 0.04365749700289023 | Validation loss: 0.04525614475064418
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614]
------------------------------
Epoch: 21
Training loss: 0.04457468941905781 | Validation loss: 0.046083587692940936
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614]
------------------------------
Epoch: 22
Training loss: 0.04552650913155001 | Validation loss: 0.045803798384526195
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614]
------------------------------
Epoch: 23
Training loss: 0.04733393179546846 | Validation loss: 0.04765025776098756
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614]
------------------------------
Epoch: 24
Training loss: 0.048526976326186404 | Validation loss: 0.05163861622705179
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614]
------------------------------
Epoch: 25
Training loss: 0.04971253661144721 | Validation loss: 0.06940157834221335
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614]
------------------------------
Epoch: 26
Training loss: 0.047659809549192064 | Validation loss: 0.049176037705996455
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614]
------------------------------
Epoch: 27
Training loss: 0.04544191577006131 | Validation loss: 0.047405094828675776
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614]
------------------------------
Epoch: 28
Training loss: 0.04377149401943346 | Validation loss: 0.04654841545750113
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614]
------------------------------
Epoch: 29
Training loss: 0.041435345920341975 | Validation loss: 0.04301520901567796
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614]
------------------------------
Epoch: 30
Training loss: 0.039447522790808424 | Validation loss: 0.041631047142779126
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105]
------------------------------
Epoch: 31
Training loss: 0.04063450964412799 | Validation loss: 0.04336159229278565
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105]
------------------------------
Epoch: 32
Training loss: 0.04200547775253653 | Validation loss: 0.04426659119918066
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105]
------------------------------
Epoch: 33
Training loss: 0.04342988368711973 | Validation loss: 0.044159990264212384
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105]
------------------------------
Epoch: 34
Training loss: 0.04422340278611764 | Validation loss: 0.04507376104593277
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105]
------------------------------
Epoch: 35
Training loss: 0.04580574428760692 | Validation loss: 0.051384791403132325
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105]
------------------------------
Epoch: 36
Training loss: 0.04419933799531703 | Validation loss: 0.04598492960281232
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105]
------------------------------
Epoch: 37
Training loss: 0.04210811722165856 | Validation loss: 0.043686308317324694
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105]
------------------------------
Epoch: 38
Training loss: 0.04024575349727744 | Validation loss: 0.043796195002163155
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105]
------------------------------
Epoch: 39
Training loss: 0.038407098160027284 | Validation loss: 0.04090072779971011
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105]
------------------------------
Epoch: 40
Training loss: 0.036603522439193174 | Validation loss: 0.039455434132148234
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543]
------------------------------
Epoch: 41
Training loss: 0.03742256851278638 | Validation loss: 0.04058602978201473
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543]
------------------------------
Epoch: 42
Training loss: 0.03843546623829752 | Validation loss: 0.041917330859338534
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543]
------------------------------
Epoch: 43
Training loss: 0.039746915991418066 | Validation loss: 0.04393004884614664
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543]
------------------------------
Epoch: 44
Training loss: 0.04127714485542751 | Validation loss: 0.044884422421455385
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543]
------------------------------
Epoch: 45
Training loss: 0.0428990826157755 | Validation loss: 0.04536734256235992
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543]
------------------------------
Epoch: 46
Training loss: 0.04116344682284092 | Validation loss: 0.04351957151118447
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543]
------------------------------
Epoch: 47
Training loss: 0.039308764099290495 | Validation loss: 0.041029254938749704
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543]
------------------------------
Epoch: 48
Training loss: 0.03747508091067797 | Validation loss: 0.04191207048647544
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543]
------------------------------
Epoch: 49
Training loss: 0.035598073072584446 | Validation loss: 0.03979100354892366
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543]
------------------------------
Epoch: 50
Training loss: 0.03438328353835172 | Validation loss: 0.037895834172034966
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583]
------------------------------
Epoch: 51
Training loss: 0.0351846847768971 | Validation loss: 0.03996862692429739
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583]
------------------------------
Epoch: 52
Training loss: 0.03598356681973919 | Validation loss: 0.0409109014141209
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583]
------------------------------
Epoch: 53
Training loss: 0.03732215452351068 | Validation loss: 0.042976783128345714
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583]
------------------------------
Epoch: 54
Training loss: 0.039110059012952995 | Validation loss: 0.044927890949389516
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583]
------------------------------
Epoch: 55
Training loss: 0.04038672065852504 | Validation loss: 0.04456539171583512
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583]
------------------------------
Epoch: 56
Training loss: 0.03914046656757005 | Validation loss: 0.04287377705468851
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583]
------------------------------
Epoch: 57
Training loss: 0.037129987647609886 | Validation loss: 0.040416243420365976
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583]
------------------------------
Epoch: 58
Training loss: 0.03537750957705277 | Validation loss: 0.03983505024191211
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583]
------------------------------
Epoch: 59
Training loss: 0.03378118044378138 | Validation loss: 0.03855334082070519
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583]
------------------------------
Epoch: 60
Training loss: 0.03227280559949577 | Validation loss: 0.03684680106885293
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468 ]
------------------------------
Epoch: 61
Training loss: 0.03293099604963668 | Validation loss: 0.03870913982391357
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468 ]
------------------------------
Epoch: 62
Training loss: 0.03440849116121076 | Validation loss: 0.03925530374707545
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468 ]
------------------------------
Epoch: 63
Training loss: 0.03551614041636257 | Validation loss: 0.04271749407052994
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468 ]
------------------------------
Epoch: 64
Training loss: 0.03671016464451034 | Validation loss: 0.044847844102803396
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468 ]
------------------------------
Epoch: 65
Training loss: 0.038699115091003475 | Validation loss: 0.04217615879195578
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468 ]
------------------------------
Epoch: 66
Training loss: 0.036989314980363765 | Validation loss: 0.04112030182252912
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468 ]
------------------------------
Epoch: 67
Training loss: 0.035253367830361974 | Validation loss: 0.03938967145102865
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468 ]
------------------------------
Epoch: 68
Training loss: 0.033740292163565756 | Validation loss: 0.0396494365461609
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468 ]
------------------------------
Epoch: 69
Training loss: 0.03221185418017405 | Validation loss: 0.03731777299852932
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468 ]
------------------------------
Epoch: 70
Training loss: 0.030991236463581262 | Validation loss: 0.03593932203948498
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468  0.03593932]
------------------------------
Epoch: 71
Training loss: 0.03149883268572586 | Validation loss: 0.037519738275338624
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468  0.03593932]
------------------------------
Epoch: 72
Training loss: 0.03258418612868378 | Validation loss: 0.03948524081531693
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468  0.03593932]
------------------------------
Epoch: 73
Training loss: 0.033819790642806574 | Validation loss: 0.040817394786897825
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468  0.03593932]
------------------------------
Epoch: 74
Training loss: 0.0349594693140764 | Validation loss: 0.038657077379962976
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468  0.03593932]
------------------------------
Epoch: 75
Training loss: 0.037001777448887496 | Validation loss: 0.043351417955230266
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468  0.03593932]
------------------------------
Epoch: 76
Training loss: 0.03563734809132783 | Validation loss: 0.04313999725615277
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468  0.03593932]
------------------------------
Epoch: 77
Training loss: 0.03373342292799957 | Validation loss: 0.04140989732216386
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468  0.03593932]
------------------------------
Epoch: 78
Training loss: 0.031958367353256203 | Validation loss: 0.03882313241415164
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468  0.03593932]
------------------------------
Epoch: 79
Training loss: 0.03071572730121644 | Validation loss: 0.038228221697842374
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468  0.03593932]
------------------------------
Epoch: 80
Training loss: 0.02992848748292186 | Validation loss: 0.03572125563069301
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468  0.03593932 0.03572126]
------------------------------
Epoch: 81
Training loss: 0.030003602803978874 | Validation loss: 0.03800442082916989
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468  0.03593932 0.03572126]
------------------------------
Epoch: 82
Training loss: 0.030959714718751218 | Validation loss: 0.0395452414146241
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468  0.03593932 0.03572126]
------------------------------
Epoch: 83
Training loss: 0.03212108367833456 | Validation loss: 0.043741495951133615
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468  0.03593932 0.03572126]
------------------------------
Epoch: 84
Training loss: 0.03381063934832223 | Validation loss: 0.04081341168459724
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468  0.03593932 0.03572126]
------------------------------
Epoch: 85
Training loss: 0.035855333057330234 | Validation loss: 0.041724231225602766
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468  0.03593932 0.03572126]
------------------------------
Epoch: 86
Training loss: 0.03387873944654865 | Validation loss: 0.03887862777885269
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468  0.03593932 0.03572126]
------------------------------
Epoch: 87
Training loss: 0.03237174124898095 | Validation loss: 0.039358641645487615
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468  0.03593932 0.03572126]
------------------------------
Epoch: 88
Training loss: 0.030933335009276083 | Validation loss: 0.03714800371843226
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468  0.03593932 0.03572126]
------------------------------
Epoch: 89
Training loss: 0.029362504421978405 | Validation loss: 0.03726502972490647
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468  0.03593932 0.03572126]
------------------------------
Epoch: 90
Training loss: 0.02861379960943994 | Validation loss: 0.03534455056137899
Validation loss (ends of cycles): [0.12329615 0.05233524 0.04525614 0.04163105 0.03945543 0.03789583
 0.0368468  0.03593932 0.03572126 0.03534455]
Early stopping!
--------------------------------------------------------------------------------
Seed: 10 | Size: 40132
--------------------------------------------------------------------------------
------------------------------
Epoch: 0
Training loss: 0.22611369123667713 | Validation loss: 0.17660081768985345
Validation loss (ends of cycles): [0.17660082]
------------------------------
Epoch: 1
Training loss: 0.10115880248188151 | Validation loss: 0.08647104853813627
Validation loss (ends of cycles): [0.17660082]
------------------------------
Epoch: 2
Training loss: 0.0806360789464684 | Validation loss: 0.07522331774894116
Validation loss (ends of cycles): [0.17660082]
------------------------------
Epoch: 3
Training loss: 0.07291009324221454 | Validation loss: 0.06868328935409014
Validation loss (ends of cycles): [0.17660082]
------------------------------
Epoch: 4
Training loss: 0.06777285844982257 | Validation loss: 0.060847439992744314
Validation loss (ends of cycles): [0.17660082]
------------------------------
Epoch: 5
Training loss: 0.06530867708575597 | Validation loss: 0.05860075982600714
Validation loss (ends of cycles): [0.17660082]
------------------------------
Epoch: 6
Training loss: 0.0605629275820592 | Validation loss: 0.060367112501268895
Validation loss (ends of cycles): [0.17660082]
------------------------------
Epoch: 7
Training loss: 0.056550232517555005 | Validation loss: 0.0516667994546943
Validation loss (ends of cycles): [0.17660082]
------------------------------
Epoch: 8
Training loss: 0.05290380347424781 | Validation loss: 0.05032209189921881
Validation loss (ends of cycles): [0.17660082]
------------------------------
Epoch: 9
Training loss: 0.04968396053414821 | Validation loss: 0.046576345174581604
Validation loss (ends of cycles): [0.17660082]
------------------------------
Epoch: 10
Training loss: 0.04691924053559622 | Validation loss: 0.04340929457241983
Validation loss (ends of cycles): [0.17660082 0.04340929]
------------------------------
Epoch: 11
Training loss: 0.04784105510054904 | Validation loss: 0.04530351345254257
Validation loss (ends of cycles): [0.17660082 0.04340929]
------------------------------
Epoch: 12
Training loss: 0.04946117783808626 | Validation loss: 0.04510788741496812
Validation loss (ends of cycles): [0.17660082 0.04340929]
------------------------------
Epoch: 13
Training loss: 0.05063574022694132 | Validation loss: 0.0505708995052671
Validation loss (ends of cycles): [0.17660082 0.04340929]
------------------------------
Epoch: 14
Training loss: 0.05166313559271571 | Validation loss: 0.05817923330561777
Validation loss (ends of cycles): [0.17660082 0.04340929]
------------------------------
Epoch: 15
Training loss: 0.05248750614801671 | Validation loss: 0.0493360729311156
Validation loss (ends of cycles): [0.17660082 0.04340929]
------------------------------
Epoch: 16
Training loss: 0.049662602678542646 | Validation loss: 0.05467164823043663
Validation loss (ends of cycles): [0.17660082 0.04340929]
------------------------------
Epoch: 17
Training loss: 0.04728653871703629 | Validation loss: 0.052291345279828635
Validation loss (ends of cycles): [0.17660082 0.04340929]
------------------------------
Epoch: 18
Training loss: 0.04459606154132488 | Validation loss: 0.05458670186983273
Validation loss (ends of cycles): [0.17660082 0.04340929]
------------------------------
Epoch: 19
Training loss: 0.04198971974542438 | Validation loss: 0.03901625990010468
Validation loss (ends of cycles): [0.17660082 0.04340929]
------------------------------
Epoch: 20
Training loss: 0.03987340637625701 | Validation loss: 0.03802946162105134
Validation loss (ends of cycles): [0.17660082 0.04340929 0.03802946]
------------------------------
Epoch: 21
Training loss: 0.04096880447445804 | Validation loss: 0.038806912893083245
Validation loss (ends of cycles): [0.17660082 0.04340929 0.03802946]
------------------------------
Epoch: 22
Training loss: 0.04220846461638163 | Validation loss: 0.0440736518167289
Validation loss (ends of cycles): [0.17660082 0.04340929 0.03802946]
------------------------------
Epoch: 23
Training loss: 0.04363556982084053 | Validation loss: 0.04022814183438246
Validation loss (ends of cycles): [0.17660082 0.04340929 0.03802946]
------------------------------
Epoch: 24
Training loss: 0.04542448289974732 | Validation loss: 0.046775908052789424
Validation loss (ends of cycles): [0.17660082 0.04340929 0.03802946]
------------------------------
Epoch: 25
Training loss: 0.04642166053568284 | Validation loss: 0.04878175716880148
Validation loss (ends of cycles): [0.17660082 0.04340929 0.03802946]
------------------------------
Epoch: 26
Training loss: 0.044440456977715405 | Validation loss: 0.04138228518113626
Validation loss (ends of cycles): [0.17660082 0.04340929 0.03802946]
------------------------------
Epoch: 27
Training loss: 0.041865324622614115 | Validation loss: 0.04312394201689589
Validation loss (ends of cycles): [0.17660082 0.04340929 0.03802946]
------------------------------
Epoch: 28
Training loss: 0.039823972675986 | Validation loss: 0.03708740910597607
Validation loss (ends of cycles): [0.17660082 0.04340929 0.03802946]
------------------------------
Epoch: 29
Training loss: 0.03766148964009123 | Validation loss: 0.036518600266591636
Validation loss (ends of cycles): [0.17660082 0.04340929 0.03802946]
------------------------------
Epoch: 30
def load_all(src_dir):
    dumps = []
    for file in glob.glob(str(src_dir/'*.pickle')):
        with open(file, 'rb') as f: 
            dumps.append(pickle.load(f))
    return dumps
src_dir = Path('/content/drive/MyDrive/research/predict-k-mirs-dl/dumps/cnn/learning_curve')
dumps = load_all(src_dir)
df = pd.concat([pd.DataFrame(perf) for perf in dumps])
grps = df.groupby(['n_samples']).agg({'test_score':['mean','std'], 'n_epochs':['mean','std']})
grps.head()
test_score n_epochs
mean std mean std
n_samples
500 0.413549 0.184284 111.000000 47.749346
1000 0.453063 0.073105 79.333333 17.224014
2000 0.534975 0.051113 121.000000 33.466401
5000 0.607885 0.039500 87.666667 23.380904
10000 0.677188 0.020144 101.000000 28.284271
grps.columns = grps.columns.map('_'.join)
grps.reset_index().plot(x='n_samples', y='test_score_mean', logx=True);

Save results

dest_dir = Path('/content/drive/MyDrive/research/predict-k-mirs-dl/dumps')

with open(dest_dir/'cnn_test_perf_vs_n_samples.pickle', 'wb') as f: 
    pickle.dump(grps, f)