import pandas as pd
import matplotlib.pyplot as plt
from autots import AutoTS
import warnings
warnings.simplefilter(action='ignore')
# Record version of key libraries
from importlib.metadata import version
print('autots==%s' % version('autots'))
autots==0.3.2
# Read local data
uni_data_df = pd.read_csv("../data/air_passengers.csv")
multi_data_df = pd.read_csv("../data/multi_ts.csv", index_col=0)
# Set column names so that constructor can recognise the data
uni_data_df.columns = ['time', 'value']
multi_data_df.columns = ['time', 'v1', 'v2']
# Add a column to identify series ID
uni_data_df['series_id'] = 'grp1'
# Convert multivariate df to use datetime index
multi_data_df.index = pd.to_datetime(multi_data_df['time'])
multi_data_df.drop(columns=['time'], inplace=True)
print(uni_data_df.head())
print(multi_data_df.head())
time value series_id 0 1949-01-01 112 grp1 1 1949-02-01 118 grp1 2 1949-03-01 132 grp1 3 1949-04-01 129 grp1 4 1949-05-01 121 grp1 v1 v2 time 2017-03-12 -0.109 53.8 2017-03-13 0.000 53.6 2017-03-14 0.178 53.5 2017-03-15 0.339 53.5 2017-03-16 0.373 53.4
# Visualise data
uni_data_df.set_index('time').plot()
plt.show()
multi_data_df.plot()
plt.show()
Models supported are :
UnivariateRegression
GLS
# Split training and testing data
forecast_length = 12
uni_train_df = uni_data_df.iloc[:-forecast_length,:]
# Setup automated model testing
# NOTE - see model list from here
# from autots.models.model_list import model_lists
uni_model = AutoTS(
forecast_length=forecast_length,
frequency='infer',
prediction_interval=0.9,
max_generations=10, # number of genetic algorithms generations to run
ensemble='all',
model_list='fast',
transformer_list='fast',
num_validations=3,
validation_method="backwards",
random_seed=6,
n_jobs='auto'
)
# Run models evaluation
uni_model = uni_model.fit(
uni_train_df,
date_col='time',
value_col='value',
id_col='series_id',
)
Inferred frequency is: MS Model Number: 1 with model AverageValueNaive in generation 0 of 10 Model Number: 2 with model AverageValueNaive in generation 0 of 10 Model Number: 3 with model AverageValueNaive in generation 0 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 3: AverageValueNaive Model Number: 4 with model ETS in generation 0 of 10 Model Number: 5 with model ETS in generation 0 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 5: ETS Model Number: 6 with model GLM in generation 0 of 10 Model Number: 7 with model GLM in generation 0 of 10 Model Number: 8 with model GLS in generation 0 of 10 Model Number: 9 with model GLS in generation 0 of 10 Model Number: 10 with model GluonTS in generation 0 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 10: GluonTS Model Number: 11 with model GluonTS in generation 0 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 11: GluonTS Model Number: 12 with model GluonTS in generation 0 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 12: GluonTS Model Number: 13 with model GluonTS in generation 0 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 13: GluonTS Model Number: 14 with model GluonTS in generation 0 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 14: GluonTS Model Number: 15 with model GluonTS in generation 0 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 15: GluonTS Model Number: 16 with model LastValueNaive in generation 0 of 10 Model Number: 17 with model LastValueNaive in generation 0 of 10 Model Number: 18 with model LastValueNaive in generation 0 of 10 Model Number: 19 with model LastValueNaive in generation 0 of 10 Model Number: 20 with model SeasonalNaive in generation 0 of 10 Model Number: 21 with model SeasonalNaive in generation 0 of 10 Model Number: 22 with model SeasonalNaive in generation 0 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 22: SeasonalNaive Model Number: 23 with model SeasonalNaive in generation 0 of 10 Model Number: 24 with model VAR in generation 0 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 24: VAR Model Number: 25 with model VAR in generation 0 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 25: VAR Model Number: 26 with model VAR in generation 0 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 26: VAR Model Number: 27 with model VECM in generation 0 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 27: VECM Model Number: 28 with model VECM in generation 0 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 28: VECM Model Number: 29 with model VECM in generation 0 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 29: VECM Model Number: 30 with model VECM in generation 0 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 30: VECM Model Number: 31 with model WindowRegression in generation 0 of 10 Model Number: 32 with model ZeroesNaive in generation 0 of 10 Model Number: 33 with model GLM in generation 0 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 33: GLM Model Number: 34 with model GLS in generation 0 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 34: GLS Model Number: 35 with model WindowRegression in generation 0 of 10 [LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000029 seconds. You can set `force_col_wise=true` to remove the overhead. [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000022 seconds. You can set `force_col_wise=true` to remove the overhead. [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000022 seconds. You can set `force_col_wise=true` to remove the overhead. [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000024 seconds. You can set `force_col_wise=true` to remove the overhead. [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000020 seconds. You can set `force_col_wise=true` to remove the overhead. [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000022 seconds. You can set `force_col_wise=true` to remove the overhead. [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000022 seconds. You can set `force_col_wise=true` to remove the overhead. [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000023 seconds. You can set `force_col_wise=true` to remove the overhead. [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000022 seconds. You can set `force_col_wise=true` to remove the overhead. [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000028 seconds. You can set `force_col_wise=true` to remove the overhead. [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000031 seconds. You can set `force_col_wise=true` to remove the overhead. [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000022 seconds. You can set `force_col_wise=true` to remove the overhead. [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf Model Number: 36 with model GluonTS in generation 0 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 36: GluonTS Model Number: 37 with model GluonTS in generation 0 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 37: GluonTS Model Number: 38 with model SeasonalNaive in generation 0 of 10 Model Number: 39 with model VAR in generation 0 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 39: VAR Model Number: 40 with model ZeroesNaive in generation 0 of 10 Model Number: 41 with model GluonTS in generation 0 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 41: GluonTS Model Number: 42 with model AverageValueNaive in generation 0 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 42: AverageValueNaive Model Number: 43 with model GluonTS in generation 0 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 43: GluonTS Model Number: 44 with model ZeroesNaive in generation 0 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 44: ZeroesNaive Model Number: 45 with model GLM in generation 0 of 10 Template Eval Error: ValueError('NaN, inf or invalid value detected in weights, estimation infeasible.') in model 45: GLM Model Number: 46 with model VECM in generation 0 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 46: VECM Model Number: 47 with model VECM in generation 0 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 47: VECM Model Number: 48 with model WindowRegression in generation 0 of 10 Model Number: 49 with model WindowRegression in generation 0 of 10 Model Number: 50 with model GLM in generation 0 of 10 Model Number: 51 with model SeasonalNaive in generation 0 of 10 Model Number: 52 with model VAR in generation 0 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 52: VAR Model Number: 53 with model ZeroesNaive in generation 0 of 10 Model Number: 54 with model SeasonalNaive in generation 0 of 10 Template Eval Error: ValueError('zero-size array to reduction operation maximum which has no identity') in model 54: SeasonalNaive Model Number: 55 with model SeasonalNaive in generation 0 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 55: SeasonalNaive Model Number: 56 with model VAR in generation 0 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 56: VAR Model Number: 57 with model LastValueNaive in generation 0 of 10 Model Number: 58 with model GLS in generation 0 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 58: GLS Model Number: 59 with model SeasonalNaive in generation 0 of 10 Model Number: 60 with model ComponentAnalysis in generation 0 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 60: ComponentAnalysis Model Number: 61 with model GluonTS in generation 0 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 61: GluonTS Model Number: 62 with model ComponentAnalysis in generation 0 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 62: ComponentAnalysis Model Number: 63 with model ZeroesNaive in generation 0 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 63: ZeroesNaive Model Number: 64 with model ComponentAnalysis in generation 0 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 64: ComponentAnalysis Model Number: 65 with model SeasonalNaive in generation 0 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 65: SeasonalNaive Model Number: 66 with model ComponentAnalysis in generation 0 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 66: ComponentAnalysis Model Number: 67 with model VECM in generation 0 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 67: VECM Model Number: 68 with model ComponentAnalysis in generation 0 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 68: ComponentAnalysis Model Number: 69 with model GLS in generation 0 of 10 Model Number: 70 with model AverageValueNaive in generation 0 of 10 Model Number: 71 with model WindowRegression in generation 0 of 10 Model Number: 72 with model GluonTS in generation 0 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 72: GluonTS New Generation: 1 of 10 Model Number: 73 with model WindowRegression in generation 1 of 10 [LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000031 seconds. You can set `force_col_wise=true` to remove the overhead. [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 73: WindowRegression Model Number: 74 with model WindowRegression in generation 1 of 10 Model Number: 75 with model WindowRegression in generation 1 of 10 Model Number: 76 with model WindowRegression in generation 1 of 10 Model Number: 77 with model SeasonalNaive in generation 1 of 10 Model Number: 78 with model SeasonalNaive in generation 1 of 10 Model Number: 79 with model SeasonalNaive in generation 1 of 10 Model Number: 80 with model SeasonalNaive in generation 1 of 10 Model Number: 81 with model GLS in generation 1 of 10 Model Number: 82 with model GLS in generation 1 of 10 Model Number: 83 with model GLS in generation 1 of 10 Model Number: 84 with model GLM in generation 1 of 10 Model Number: 85 with model GLM in generation 1 of 10 Model Number: 86 with model GLM in generation 1 of 10 Template Eval Error: ValueError('NaN, inf or invalid value detected in weights, estimation infeasible.') in model 86: GLM Model Number: 87 with model GLM in generation 1 of 10 Model Number: 88 with model AverageValueNaive in generation 1 of 10 Model Number: 89 with model AverageValueNaive in generation 1 of 10 Model Number: 90 with model ETS in generation 1 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 90: ETS Model Number: 91 with model ETS in generation 1 of 10 Model Number: 92 with model ETS in generation 1 of 10 Model Number: 93 with model ETS in generation 1 of 10 Model Number: 94 with model ZeroesNaive in generation 1 of 10 Model Number: 95 with model ZeroesNaive in generation 1 of 10 Model Number: 96 with model ZeroesNaive in generation 1 of 10 Model Number: 97 with model LastValueNaive in generation 1 of 10 Model Number: 98 with model LastValueNaive in generation 1 of 10 Model Number: 99 with model GluonTS in generation 1 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 99: GluonTS Model Number: 100 with model GluonTS in generation 1 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 100: GluonTS Model Number: 101 with model GluonTS in generation 1 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 101: GluonTS Model Number: 102 with model GluonTS in generation 1 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 102: GluonTS Model Number: 103 with model VAR in generation 1 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 103: VAR Model Number: 104 with model VAR in generation 1 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 104: VAR Model Number: 105 with model VAR in generation 1 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 105: VAR Model Number: 106 with model VAR in generation 1 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 106: VAR Model Number: 107 with model VECM in generation 1 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 107: VECM Model Number: 108 with model VECM in generation 1 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 108: VECM Model Number: 109 with model VECM in generation 1 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 109: VECM Model Number: 110 with model VECM in generation 1 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 110: VECM Model Number: 111 with model ComponentAnalysis in generation 1 of 10 Model Number: 112 with model ComponentAnalysis in generation 1 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 112: ComponentAnalysis Model Number: 113 with model ComponentAnalysis in generation 1 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 113: ComponentAnalysis New Generation: 2 of 10 Model Number: 114 with model WindowRegression in generation 2 of 10 Model Number: 115 with model WindowRegression in generation 2 of 10 Model Number: 116 with model WindowRegression in generation 2 of 10 Model Number: 117 with model WindowRegression in generation 2 of 10 Model Number: 118 with model SeasonalNaive in generation 2 of 10 Model Number: 119 with model SeasonalNaive in generation 2 of 10 Model Number: 120 with model SeasonalNaive in generation 2 of 10 Model Number: 121 with model SeasonalNaive in generation 2 of 10 Model Number: 122 with model GLS in generation 2 of 10 Model Number: 123 with model GLS in generation 2 of 10 Model Number: 124 with model GLS in generation 2 of 10 Model Number: 125 with model GLM in generation 2 of 10 Model Number: 126 with model GLM in generation 2 of 10 Model Number: 127 with model GLM in generation 2 of 10 Model Number: 128 with model GLM in generation 2 of 10 Model Number: 129 with model LastValueNaive in generation 2 of 10 Model Number: 130 with model LastValueNaive in generation 2 of 10 Model Number: 131 with model LastValueNaive in generation 2 of 10 Model Number: 132 with model ETS in generation 2 of 10 Model Number: 133 with model ETS in generation 2 of 10 Model Number: 134 with model ETS in generation 2 of 10 Model Number: 135 with model ETS in generation 2 of 10 Model Number: 136 with model AverageValueNaive in generation 2 of 10 Model Number: 137 with model AverageValueNaive in generation 2 of 10 Model Number: 138 with model ZeroesNaive in generation 2 of 10 Model Number: 139 with model ZeroesNaive in generation 2 of 10 Model Number: 140 with model ZeroesNaive in generation 2 of 10 Model Number: 141 with model ComponentAnalysis in generation 2 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 141: ComponentAnalysis Model Number: 142 with model ComponentAnalysis in generation 2 of 10
INFO:fbprophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this. INFO:fbprophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.
Template Eval Error: RuntimeError('dictionary changed size during iteration') in model 142: ComponentAnalysis Model Number: 143 with model ComponentAnalysis in generation 2 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 143: ComponentAnalysis Model Number: 144 with model GluonTS in generation 2 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 144: GluonTS Model Number: 145 with model GluonTS in generation 2 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 145: GluonTS Model Number: 146 with model GluonTS in generation 2 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 146: GluonTS Model Number: 147 with model GluonTS in generation 2 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 147: GluonTS Model Number: 148 with model VAR in generation 2 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 148: VAR Model Number: 149 with model VAR in generation 2 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 149: VAR Model Number: 150 with model VAR in generation 2 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 150: VAR Model Number: 151 with model VAR in generation 2 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 151: VAR Model Number: 152 with model VECM in generation 2 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 152: VECM Model Number: 153 with model VECM in generation 2 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 153: VECM Model Number: 154 with model VECM in generation 2 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 154: VECM Model Number: 155 with model VECM in generation 2 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 155: VECM New Generation: 3 of 10 Model Number: 156 with model WindowRegression in generation 3 of 10 Model Number: 157 with model WindowRegression in generation 3 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 157: WindowRegression Model Number: 158 with model WindowRegression in generation 3 of 10 Model Number: 159 with model WindowRegression in generation 3 of 10 Model Number: 160 with model SeasonalNaive in generation 3 of 10 Model Number: 161 with model SeasonalNaive in generation 3 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 161: SeasonalNaive Model Number: 162 with model SeasonalNaive in generation 3 of 10 Model Number: 163 with model SeasonalNaive in generation 3 of 10 Model Number: 164 with model GLM in generation 3 of 10 Model Number: 165 with model GLM in generation 3 of 10 Model Number: 166 with model GLM in generation 3 of 10 Model Number: 167 with model AverageValueNaive in generation 3 of 10 Model Number: 168 with model AverageValueNaive in generation 3 of 10 Model Number: 169 with model AverageValueNaive in generation 3 of 10 Model Number: 170 with model ETS in generation 3 of 10 Model Number: 171 with model ETS in generation 3 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 171: ETS Model Number: 172 with model ETS in generation 3 of 10 Model Number: 173 with model ETS in generation 3 of 10 Model Number: 174 with model LastValueNaive in generation 3 of 10 Model Number: 175 with model LastValueNaive in generation 3 of 10 Model Number: 176 with model LastValueNaive in generation 3 of 10 Model Number: 177 with model GLS in generation 3 of 10 Model Number: 178 with model GLS in generation 3 of 10 Model Number: 179 with model GLS in generation 3 of 10 Model Number: 180 with model ZeroesNaive in generation 3 of 10 Model Number: 181 with model ZeroesNaive in generation 3 of 10 Model Number: 182 with model ZeroesNaive in generation 3 of 10 Model Number: 183 with model ComponentAnalysis in generation 3 of 10 Model Number: 184 with model ComponentAnalysis in generation 3 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 184: ComponentAnalysis Model Number: 185 with model ComponentAnalysis in generation 3 of 10 Model Number: 186 with model GluonTS in generation 3 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 186: GluonTS Model Number: 187 with model GluonTS in generation 3 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 187: GluonTS Model Number: 188 with model GluonTS in generation 3 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 188: GluonTS Model Number: 189 with model GluonTS in generation 3 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 189: GluonTS Model Number: 190 with model VAR in generation 3 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 190: VAR Model Number: 191 with model VAR in generation 3 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 191: VAR Model Number: 192 with model VAR in generation 3 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 192: VAR Model Number: 193 with model VAR in generation 3 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 193: VAR Model Number: 194 with model VECM in generation 3 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 194: VECM Model Number: 195 with model VECM in generation 3 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 195: VECM Model Number: 196 with model VECM in generation 3 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 196: VECM Model Number: 197 with model VECM in generation 3 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 197: VECM New Generation: 4 of 10 Model Number: 198 with model WindowRegression in generation 4 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 198: WindowRegression Model Number: 199 with model WindowRegression in generation 4 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 199: WindowRegression Model Number: 200 with model WindowRegression in generation 4 of 10 Model Number: 201 with model WindowRegression in generation 4 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 201: WindowRegression Model Number: 202 with model SeasonalNaive in generation 4 of 10 Model Number: 203 with model SeasonalNaive in generation 4 of 10 Model Number: 204 with model SeasonalNaive in generation 4 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 204: SeasonalNaive Model Number: 205 with model SeasonalNaive in generation 4 of 10 Model Number: 206 with model GLM in generation 4 of 10 Model Number: 207 with model GLM in generation 4 of 10 Model Number: 208 with model GLM in generation 4 of 10 Model Number: 209 with model GLM in generation 4 of 10 Model Number: 210 with model GLS in generation 4 of 10 Model Number: 211 with model GLS in generation 4 of 10 Model Number: 212 with model GLS in generation 4 of 10 Model Number: 213 with model AverageValueNaive in generation 4 of 10 Model Number: 214 with model AverageValueNaive in generation 4 of 10 Model Number: 215 with model AverageValueNaive in generation 4 of 10 Model Number: 216 with model ETS in generation 4 of 10 Model Number: 217 with model ETS in generation 4 of 10 Model Number: 218 with model ETS in generation 4 of 10 Model Number: 219 with model ETS in generation 4 of 10 Model Number: 220 with model ZeroesNaive in generation 4 of 10 Model Number: 221 with model ZeroesNaive in generation 4 of 10 Model Number: 222 with model ZeroesNaive in generation 4 of 10 Model Number: 223 with model LastValueNaive in generation 4 of 10 Model Number: 224 with model ComponentAnalysis in generation 4 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 224: ComponentAnalysis Model Number: 225 with model ComponentAnalysis in generation 4 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 225: ComponentAnalysis Model Number: 226 with model ComponentAnalysis in generation 4 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 226: ComponentAnalysis Model Number: 227 with model GluonTS in generation 4 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 227: GluonTS Model Number: 228 with model GluonTS in generation 4 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 228: GluonTS Model Number: 229 with model GluonTS in generation 4 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 229: GluonTS Model Number: 230 with model GluonTS in generation 4 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 230: GluonTS Model Number: 231 with model VAR in generation 4 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 231: VAR Model Number: 232 with model VAR in generation 4 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 232: VAR Model Number: 233 with model VAR in generation 4 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 233: VAR Model Number: 234 with model VAR in generation 4 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 234: VAR Model Number: 235 with model VECM in generation 4 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 235: VECM Model Number: 236 with model VECM in generation 4 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 236: VECM Model Number: 237 with model VECM in generation 4 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 237: VECM Model Number: 238 with model VECM in generation 4 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 238: VECM New Generation: 5 of 10 Model Number: 239 with model WindowRegression in generation 5 of 10 Model Number: 240 with model WindowRegression in generation 5 of 10 Model Number: 241 with model WindowRegression in generation 5 of 10 Model Number: 242 with model WindowRegression in generation 5 of 10 Model Number: 243 with model AverageValueNaive in generation 5 of 10 Model Number: 244 with model AverageValueNaive in generation 5 of 10 Model Number: 245 with model SeasonalNaive in generation 5 of 10 Model Number: 246 with model SeasonalNaive in generation 5 of 10 Model Number: 247 with model SeasonalNaive in generation 5 of 10 Model Number: 248 with model SeasonalNaive in generation 5 of 10 Model Number: 249 with model GLM in generation 5 of 10 Model Number: 250 with model GLM in generation 5 of 10 Model Number: 251 with model GLM in generation 5 of 10 Model Number: 252 with model GLM in generation 5 of 10 Model Number: 253 with model GLS in generation 5 of 10 Model Number: 254 with model GLS in generation 5 of 10 Model Number: 255 with model ETS in generation 5 of 10 Model Number: 256 with model ETS in generation 5 of 10 Model Number: 257 with model ETS in generation 5 of 10 Model Number: 258 with model ETS in generation 5 of 10 Model Number: 259 with model ZeroesNaive in generation 5 of 10 Model Number: 260 with model ZeroesNaive in generation 5 of 10 Model Number: 261 with model ZeroesNaive in generation 5 of 10 Model Number: 262 with model LastValueNaive in generation 5 of 10 Model Number: 263 with model LastValueNaive in generation 5 of 10 Model Number: 264 with model ComponentAnalysis in generation 5 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 264: ComponentAnalysis Model Number: 265 with model ComponentAnalysis in generation 5 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 265: ComponentAnalysis Model Number: 266 with model ComponentAnalysis in generation 5 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 266: ComponentAnalysis Model Number: 267 with model GluonTS in generation 5 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 267: GluonTS Model Number: 268 with model GluonTS in generation 5 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 268: GluonTS Model Number: 269 with model GluonTS in generation 5 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 269: GluonTS Model Number: 270 with model GluonTS in generation 5 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 270: GluonTS Model Number: 271 with model VAR in generation 5 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 271: VAR Model Number: 272 with model VAR in generation 5 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 272: VAR Model Number: 273 with model VAR in generation 5 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 273: VAR Model Number: 274 with model VAR in generation 5 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 274: VAR Model Number: 275 with model VECM in generation 5 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 275: VECM Model Number: 276 with model VECM in generation 5 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 276: VECM Model Number: 277 with model VECM in generation 5 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 277: VECM New Generation: 6 of 10 Model Number: 278 with model WindowRegression in generation 6 of 10 Template Eval Error: ImportError('Tensorflow not available, install with pip install tensorflow.') in model 278: WindowRegression Model Number: 279 with model WindowRegression in generation 6 of 10 Model Number: 280 with model WindowRegression in generation 6 of 10 Model Number: 281 with model WindowRegression in generation 6 of 10 Template Eval Error: ImportError('Tensorflow not available, install with pip install tensorflow.') in model 281: WindowRegression Model Number: 282 with model AverageValueNaive in generation 6 of 10 Model Number: 283 with model AverageValueNaive in generation 6 of 10 Model Number: 284 with model AverageValueNaive in generation 6 of 10 Model Number: 285 with model SeasonalNaive in generation 6 of 10 Model Number: 286 with model SeasonalNaive in generation 6 of 10 Model Number: 287 with model SeasonalNaive in generation 6 of 10 Model Number: 288 with model SeasonalNaive in generation 6 of 10 Model Number: 289 with model GLM in generation 6 of 10 Model Number: 290 with model GLM in generation 6 of 10 Model Number: 291 with model GLS in generation 6 of 10 Model Number: 292 with model GLS in generation 6 of 10 Model Number: 293 with model GLS in generation 6 of 10 Model Number: 294 with model ETS in generation 6 of 10 Model Number: 295 with model ETS in generation 6 of 10 Model Number: 296 with model ETS in generation 6 of 10 Model Number: 297 with model ETS in generation 6 of 10 Model Number: 298 with model ZeroesNaive in generation 6 of 10 Model Number: 299 with model ZeroesNaive in generation 6 of 10 Model Number: 300 with model ZeroesNaive in generation 6 of 10 Model Number: 301 with model LastValueNaive in generation 6 of 10 Model Number: 302 with model LastValueNaive in generation 6 of 10 Model Number: 303 with model LastValueNaive in generation 6 of 10 Model Number: 304 with model ComponentAnalysis in generation 6 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 304: ComponentAnalysis Model Number: 305 with model ComponentAnalysis in generation 6 of 10 Model Number: 306 with model ComponentAnalysis in generation 6 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 306: ComponentAnalysis Model Number: 307 with model GluonTS in generation 6 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 307: GluonTS Model Number: 308 with model GluonTS in generation 6 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 308: GluonTS Model Number: 309 with model GluonTS in generation 6 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 309: GluonTS Model Number: 310 with model GluonTS in generation 6 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 310: GluonTS Model Number: 311 with model VAR in generation 6 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 311: VAR Model Number: 312 with model VAR in generation 6 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 312: VAR Model Number: 313 with model VAR in generation 6 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 313: VAR Model Number: 314 with model VECM in generation 6 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 314: VECM Model Number: 315 with model VECM in generation 6 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 315: VECM Model Number: 316 with model VECM in generation 6 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 316: VECM Model Number: 317 with model VECM in generation 6 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 317: VECM New Generation: 7 of 10 Model Number: 318 with model WindowRegression in generation 7 of 10 Model Number: 319 with model WindowRegression in generation 7 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 319: WindowRegression Model Number: 320 with model WindowRegression in generation 7 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 320: WindowRegression Model Number: 321 with model WindowRegression in generation 7 of 10 Model Number: 322 with model AverageValueNaive in generation 7 of 10 Model Number: 323 with model AverageValueNaive in generation 7 of 10 Model Number: 324 with model AverageValueNaive in generation 7 of 10 Model Number: 325 with model SeasonalNaive in generation 7 of 10 Model Number: 326 with model SeasonalNaive in generation 7 of 10 Model Number: 327 with model SeasonalNaive in generation 7 of 10 Model Number: 328 with model SeasonalNaive in generation 7 of 10 Model Number: 329 with model GLM in generation 7 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/genmod/generalized_linear_model.py:293: DomainWarning: The inverse_power link function does not respect the domain of the Gamma family. warnings.warn((f"The {type(family.link).__name__} link function "
Model Number: 330 with model GLM in generation 7 of 10 Model Number: 331 with model GLM in generation 7 of 10 Model Number: 332 with model GLM in generation 7 of 10 Template Eval Error: PerfectSeparationError('Perfect separation detected, results not available') in model 332: GLM Model Number: 333 with model GLS in generation 7 of 10 Model Number: 334 with model GLS in generation 7 of 10 Model Number: 335 with model ComponentAnalysis in generation 7 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 335: ComponentAnalysis Model Number: 336 with model ComponentAnalysis in generation 7 of 10 Model Number: 337 with model ComponentAnalysis in generation 7 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 337: ComponentAnalysis Model Number: 338 with model ETS in generation 7 of 10 Model Number: 339 with model ETS in generation 7 of 10 Model Number: 340 with model ETS in generation 7 of 10 Model Number: 341 with model ZeroesNaive in generation 7 of 10 Model Number: 342 with model ZeroesNaive in generation 7 of 10 Model Number: 343 with model ZeroesNaive in generation 7 of 10 Model Number: 344 with model LastValueNaive in generation 7 of 10 Model Number: 345 with model LastValueNaive in generation 7 of 10 Model Number: 346 with model GluonTS in generation 7 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 346: GluonTS Model Number: 347 with model GluonTS in generation 7 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 347: GluonTS Model Number: 348 with model GluonTS in generation 7 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 348: GluonTS Model Number: 349 with model GluonTS in generation 7 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 349: GluonTS Model Number: 350 with model VAR in generation 7 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 350: VAR Model Number: 351 with model VAR in generation 7 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 351: VAR Model Number: 352 with model VAR in generation 7 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 352: VAR Model Number: 353 with model VAR in generation 7 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 353: VAR Model Number: 354 with model VECM in generation 7 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 354: VECM Model Number: 355 with model VECM in generation 7 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 355: VECM Model Number: 356 with model VECM in generation 7 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 356: VECM Model Number: 357 with model VECM in generation 7 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 357: VECM New Generation: 8 of 10 Model Number: 358 with model WindowRegression in generation 8 of 10 Model Number: 359 with model WindowRegression in generation 8 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 359: WindowRegression Model Number: 360 with model WindowRegression in generation 8 of 10 Model Number: 361 with model AverageValueNaive in generation 8 of 10 Model Number: 362 with model AverageValueNaive in generation 8 of 10 Model Number: 363 with model SeasonalNaive in generation 8 of 10 Model Number: 364 with model SeasonalNaive in generation 8 of 10 Model Number: 365 with model SeasonalNaive in generation 8 of 10 Model Number: 366 with model SeasonalNaive in generation 8 of 10 Model Number: 367 with model GLM in generation 8 of 10 Model Number: 368 with model GLM in generation 8 of 10 Model Number: 369 with model GLM in generation 8 of 10 Model Number: 370 with model GLM in generation 8 of 10 Model Number: 371 with model GLS in generation 8 of 10 Model Number: 372 with model GLS in generation 8 of 10 Model Number: 373 with model ComponentAnalysis in generation 8 of 10 Model Number: 374 with model ComponentAnalysis in generation 8 of 10 Model Number: 375 with model ComponentAnalysis in generation 8 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 375: ComponentAnalysis Model Number: 376 with model LastValueNaive in generation 8 of 10 Model Number: 377 with model LastValueNaive in generation 8 of 10 Model Number: 378 with model LastValueNaive in generation 8 of 10 Model Number: 379 with model ETS in generation 8 of 10 Model Number: 380 with model ETS in generation 8 of 10 Model Number: 381 with model ETS in generation 8 of 10 Model Number: 382 with model ETS in generation 8 of 10 Model Number: 383 with model ZeroesNaive in generation 8 of 10 Model Number: 384 with model ZeroesNaive in generation 8 of 10 Model Number: 385 with model GluonTS in generation 8 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 385: GluonTS Model Number: 386 with model GluonTS in generation 8 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 386: GluonTS Model Number: 387 with model GluonTS in generation 8 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 387: GluonTS Model Number: 388 with model GluonTS in generation 8 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 388: GluonTS Model Number: 389 with model VAR in generation 8 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 389: VAR Model Number: 390 with model VAR in generation 8 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 390: VAR Model Number: 391 with model VAR in generation 8 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 391: VAR Model Number: 392 with model VAR in generation 8 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 392: VAR Model Number: 393 with model VECM in generation 8 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 393: VECM Model Number: 394 with model VECM in generation 8 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 394: VECM Model Number: 395 with model VECM in generation 8 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 395: VECM Model Number: 396 with model VECM in generation 8 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 396: VECM New Generation: 9 of 10 Model Number: 397 with model WindowRegression in generation 9 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 397: WindowRegression Model Number: 398 with model WindowRegression in generation 9 of 10 Model Number: 399 with model WindowRegression in generation 9 of 10 Model Number: 400 with model WindowRegression in generation 9 of 10 Model Number: 401 with model ComponentAnalysis in generation 9 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 401: ComponentAnalysis Model Number: 402 with model ComponentAnalysis in generation 9 of 10 Model Number: 403 with model ComponentAnalysis in generation 9 of 10 Model Number: 404 with model AverageValueNaive in generation 9 of 10 Model Number: 405 with model AverageValueNaive in generation 9 of 10 Model Number: 406 with model AverageValueNaive in generation 9 of 10 Model Number: 407 with model SeasonalNaive in generation 9 of 10 Model Number: 408 with model SeasonalNaive in generation 9 of 10 Model Number: 409 with model SeasonalNaive in generation 9 of 10 Model Number: 410 with model SeasonalNaive in generation 9 of 10 Model Number: 411 with model GLM in generation 9 of 10 Model Number: 412 with model GLM in generation 9 of 10 Model Number: 413 with model GLM in generation 9 of 10 Model Number: 414 with model GLM in generation 9 of 10 Model Number: 415 with model GLS in generation 9 of 10 Model Number: 416 with model GLS in generation 9 of 10 Model Number: 417 with model GLS in generation 9 of 10 Model Number: 418 with model ETS in generation 9 of 10 Model Number: 419 with model ETS in generation 9 of 10 Model Number: 420 with model ETS in generation 9 of 10 Model Number: 421 with model LastValueNaive in generation 9 of 10 Model Number: 422 with model LastValueNaive in generation 9 of 10 Model Number: 423 with model LastValueNaive in generation 9 of 10 Model Number: 424 with model ZeroesNaive in generation 9 of 10 Model Number: 425 with model ZeroesNaive in generation 9 of 10 Model Number: 426 with model GluonTS in generation 9 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 426: GluonTS Model Number: 427 with model GluonTS in generation 9 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 427: GluonTS Model Number: 428 with model GluonTS in generation 9 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 428: GluonTS Model Number: 429 with model GluonTS in generation 9 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 429: GluonTS Model Number: 430 with model VAR in generation 9 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 430: VAR Model Number: 431 with model VAR in generation 9 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 431: VAR Model Number: 432 with model VAR in generation 9 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 432: VAR Model Number: 433 with model VAR in generation 9 of 10 Template Eval Error: ValueError('Only gave one variable to VAR') in model 433: VAR Model Number: 434 with model VECM in generation 9 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 434: VECM Model Number: 435 with model VECM in generation 9 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 435: VECM Model Number: 436 with model VECM in generation 9 of 10 Template Eval Error: ValueError('Only gave one variable to VECM') in model 436: VECM New Generation: 10 of 10 Model Number: 437 with model WindowRegression in generation 10 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 437: WindowRegression Model Number: 438 with model WindowRegression in generation 10 of 10 Model Number: 439 with model WindowRegression in generation 10 of 10 Model Number: 440 with model WindowRegression in generation 10 of 10 Model Number: 441 with model ComponentAnalysis in generation 10 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 441: ComponentAnalysis Model Number: 442 with model ComponentAnalysis in generation 10 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 442: ComponentAnalysis Model Number: 443 with model ComponentAnalysis in generation 10 of 10 Model Number: 444 with model AverageValueNaive in generation 10 of 10 Model Number: 445 with model AverageValueNaive in generation 10 of 10 Model Number: 446 with model SeasonalNaive in generation 10 of 10 Model Number: 447 with model SeasonalNaive in generation 10 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 447: SeasonalNaive Model Number: 448 with model SeasonalNaive in generation 10 of 10 Model Number: 449 with model SeasonalNaive in generation 10 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 449: SeasonalNaive Model Number: 450 with model GLM in generation 10 of 10 Model Number: 451 with model GLM in generation 10 of 10 Model Number: 452 with model GLM in generation 10 of 10 Model Number: 453 with model GLS in generation 10 of 10 Model Number: 454 with model GLS in generation 10 of 10 Model Number: 455 with model GLS in generation 10 of 10 Model Number: 456 with model ETS in generation 10 of 10 Model Number: 457 with model ETS in generation 10 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 457: ETS Model Number: 458 with model ETS in generation 10 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 458: ETS Model Number: 459 with model ETS in generation 10 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 459: ETS Model Number: 460 with model LastValueNaive in generation 10 of 10 Model Number: 461 with model LastValueNaive in generation 10 of 10 Model Number: 462 with model LastValueNaive in generation 10 of 10 Template Eval Error: ValueError('Unable to coerce to Series, length must be 1: given 120') in model 462: LastValueNaive Model Number: 463 with model Ensemble in generation 0 of 0 Model Number: 464 with model Ensemble in generation 0 of 0 Model Number: 465 with model Ensemble in generation 0 of 0 Model Number: 466 with model Ensemble in generation 0 of 0 Model Number: 467 with model Ensemble in generation 0 of 0 Model Number: 468 with model Ensemble in generation 0 of 0 Validation Round: 1 Model Number: 1 of 69 with model Ensemble for Validation 1 Model Number: 2 of 69 with model Ensemble for Validation 1 Model Number: 3 of 69 with model Ensemble for Validation 1 Model Number: 4 of 69 with model WindowRegression for Validation 1 Model Number: 5 of 69 with model Ensemble for Validation 1 Model Number: 6 of 69 with model Ensemble for Validation 1 Model Number: 7 of 69 with model WindowRegression for Validation 1 Model Number: 8 of 69 with model Ensemble for Validation 1 Model Number: 9 of 69 with model WindowRegression for Validation 1 Model Number: 10 of 69 with model WindowRegression for Validation 1 Model Number: 11 of 69 with model WindowRegression for Validation 1 Model Number: 12 of 69 with model WindowRegression for Validation 1 Model Number: 13 of 69 with model WindowRegression for Validation 1 Model Number: 14 of 69 with model ComponentAnalysis for Validation 1 Model Number: 15 of 69 with model GLS for Validation 1 Model Number: 16 of 69 with model GLS for Validation 1 Model Number: 17 of 69 with model ComponentAnalysis for Validation 1 Model Number: 18 of 69 with model AverageValueNaive for Validation 1 Model Number: 19 of 69 with model AverageValueNaive for Validation 1 Model Number: 20 of 69 with model AverageValueNaive for Validation 1 Model Number: 21 of 69 with model AverageValueNaive for Validation 1 Model Number: 22 of 69 with model AverageValueNaive for Validation 1 Model Number: 23 of 69 with model AverageValueNaive for Validation 1 Model Number: 24 of 69 with model SeasonalNaive for Validation 1 Model Number: 25 of 69 with model SeasonalNaive for Validation 1 Model Number: 26 of 69 with model SeasonalNaive for Validation 1 Model Number: 27 of 69 with model SeasonalNaive for Validation 1 Model Number: 28 of 69 with model SeasonalNaive for Validation 1 Model Number: 29 of 69 with model SeasonalNaive for Validation 1 Model Number: 30 of 69 with model SeasonalNaive for Validation 1 Model Number: 31 of 69 with model ComponentAnalysis for Validation 1 Model Number: 32 of 69 with model ComponentAnalysis for Validation 1 Model Number: 33 of 69 with model ComponentAnalysis for Validation 1 Model Number: 34 of 69 with model GLM for Validation 1 Model Number: 35 of 69 with model GLM for Validation 1 Model Number: 36 of 69 with model GLM for Validation 1 Model Number: 37 of 69 with model GLM for Validation 1 Model Number: 38 of 69 with model GLM for Validation 1 Model Number: 39 of 69 with model GLM for Validation 1 Model Number: 40 of 69 with model GLM for Validation 1 Model Number: 41 of 69 with model GLS for Validation 1 Model Number: 42 of 69 with model GLS for Validation 1 Model Number: 43 of 69 with model GLS for Validation 1 Model Number: 44 of 69 with model GLS for Validation 1 Model Number: 45 of 69 with model AverageValueNaive for Validation 1 Model Number: 46 of 69 with model ComponentAnalysis for Validation 1 Model Number: 47 of 69 with model GLS for Validation 1 Model Number: 48 of 69 with model ComponentAnalysis for Validation 1 Model Number: 49 of 69 with model ETS for Validation 1 Model Number: 50 of 69 with model ETS for Validation 1 Model Number: 51 of 69 with model LastValueNaive for Validation 1 Model Number: 52 of 69 with model ETS for Validation 1 Model Number: 53 of 69 with model ETS for Validation 1 Model Number: 54 of 69 with model ETS for Validation 1 Model Number: 55 of 69 with model ZeroesNaive for Validation 1 Model Number: 56 of 69 with model ZeroesNaive for Validation 1 Model Number: 57 of 69 with model ZeroesNaive for Validation 1 Model Number: 58 of 69 with model ZeroesNaive for Validation 1 Model Number: 59 of 69 with model ZeroesNaive for Validation 1 Model Number: 60 of 69 with model ZeroesNaive for Validation 1 Model Number: 61 of 69 with model ZeroesNaive for Validation 1 Model Number: 62 of 69 with model ETS for Validation 1 Model Number: 63 of 69 with model LastValueNaive for Validation 1 Model Number: 64 of 69 with model LastValueNaive for Validation 1 Model Number: 65 of 69 with model ETS for Validation 1 Model Number: 66 of 69 with model LastValueNaive for Validation 1 Model Number: 67 of 69 with model LastValueNaive for Validation 1 Model Number: 68 of 69 with model LastValueNaive for Validation 1 Model Number: 69 of 69 with model LastValueNaive for Validation 1 Validation Round: 2 Model Number: 1 of 69 with model Ensemble for Validation 2 Model Number: 2 of 69 with model Ensemble for Validation 2 Model Number: 3 of 69 with model Ensemble for Validation 2 Model Number: 4 of 69 with model WindowRegression for Validation 2 Model Number: 5 of 69 with model Ensemble for Validation 2 Model Number: 6 of 69 with model Ensemble for Validation 2 Model Number: 7 of 69 with model WindowRegression for Validation 2 Model Number: 8 of 69 with model Ensemble for Validation 2 Model Number: 9 of 69 with model WindowRegression for Validation 2 Model Number: 10 of 69 with model WindowRegression for Validation 2 Model Number: 11 of 69 with model WindowRegression for Validation 2 Model Number: 12 of 69 with model WindowRegression for Validation 2 Model Number: 13 of 69 with model WindowRegression for Validation 2 Model Number: 14 of 69 with model ComponentAnalysis for Validation 2 Model Number: 15 of 69 with model GLS for Validation 2 Model Number: 16 of 69 with model GLS for Validation 2 Model Number: 17 of 69 with model ComponentAnalysis for Validation 2 Model Number: 18 of 69 with model AverageValueNaive for Validation 2 Model Number: 19 of 69 with model AverageValueNaive for Validation 2 Model Number: 20 of 69 with model AverageValueNaive for Validation 2 Model Number: 21 of 69 with model AverageValueNaive for Validation 2 Model Number: 22 of 69 with model AverageValueNaive for Validation 2 Model Number: 23 of 69 with model AverageValueNaive for Validation 2 Model Number: 24 of 69 with model SeasonalNaive for Validation 2 Model Number: 25 of 69 with model SeasonalNaive for Validation 2 Model Number: 26 of 69 with model SeasonalNaive for Validation 2 Model Number: 27 of 69 with model SeasonalNaive for Validation 2 Model Number: 28 of 69 with model SeasonalNaive for Validation 2 Model Number: 29 of 69 with model SeasonalNaive for Validation 2 Model Number: 30 of 69 with model SeasonalNaive for Validation 2 Model Number: 31 of 69 with model ComponentAnalysis for Validation 2 Model Number: 32 of 69 with model ComponentAnalysis for Validation 2 Model Number: 33 of 69 with model ComponentAnalysis for Validation 2 Model Number: 34 of 69 with model GLM for Validation 2 Model Number: 35 of 69 with model GLM for Validation 2 Model Number: 36 of 69 with model GLM for Validation 2 Model Number: 37 of 69 with model GLM for Validation 2 Model Number: 38 of 69 with model GLM for Validation 2 Model Number: 39 of 69 with model GLM for Validation 2 Model Number: 40 of 69 with model GLM for Validation 2 Model Number: 41 of 69 with model GLS for Validation 2 Model Number: 42 of 69 with model GLS for Validation 2 Model Number: 43 of 69 with model GLS for Validation 2 Model Number: 44 of 69 with model GLS for Validation 2 Model Number: 45 of 69 with model AverageValueNaive for Validation 2 Model Number: 46 of 69 with model ComponentAnalysis for Validation 2 Model Number: 47 of 69 with model GLS for Validation 2 Model Number: 48 of 69 with model ComponentAnalysis for Validation 2 Model Number: 49 of 69 with model ETS for Validation 2 Model Number: 50 of 69 with model ETS for Validation 2 Model Number: 51 of 69 with model LastValueNaive for Validation 2 Model Number: 52 of 69 with model ETS for Validation 2 Model Number: 53 of 69 with model ETS for Validation 2 Model Number: 54 of 69 with model ETS for Validation 2 Model Number: 55 of 69 with model ZeroesNaive for Validation 2 Model Number: 56 of 69 with model ZeroesNaive for Validation 2 Model Number: 57 of 69 with model ZeroesNaive for Validation 2 Model Number: 58 of 69 with model ZeroesNaive for Validation 2 Model Number: 59 of 69 with model ZeroesNaive for Validation 2 Model Number: 60 of 69 with model ZeroesNaive for Validation 2 Model Number: 61 of 69 with model ZeroesNaive for Validation 2 Model Number: 62 of 69 with model ETS for Validation 2 Model Number: 63 of 69 with model LastValueNaive for Validation 2 Model Number: 64 of 69 with model LastValueNaive for Validation 2 Model Number: 65 of 69 with model ETS for Validation 2 Model Number: 66 of 69 with model LastValueNaive for Validation 2 Model Number: 67 of 69 with model LastValueNaive for Validation 2 Model Number: 68 of 69 with model LastValueNaive for Validation 2 Model Number: 69 of 69 with model LastValueNaive for Validation 2 Validation Round: 3 Model Number: 1 of 69 with model Ensemble for Validation 3 Model Number: 2 of 69 with model Ensemble for Validation 3 Model Number: 3 of 69 with model Ensemble for Validation 3 Model Number: 4 of 69 with model WindowRegression for Validation 3 Model Number: 5 of 69 with model Ensemble for Validation 3 Model Number: 6 of 69 with model Ensemble for Validation 3 Model Number: 7 of 69 with model WindowRegression for Validation 3 Model Number: 8 of 69 with model Ensemble for Validation 3 Model Number: 9 of 69 with model WindowRegression for Validation 3 Model Number: 10 of 69 with model WindowRegression for Validation 3 Model Number: 11 of 69 with model WindowRegression for Validation 3 Model Number: 12 of 69 with model WindowRegression for Validation 3 Model Number: 13 of 69 with model WindowRegression for Validation 3 Model Number: 14 of 69 with model ComponentAnalysis for Validation 3 Model Number: 15 of 69 with model GLS for Validation 3 Model Number: 16 of 69 with model GLS for Validation 3 Model Number: 17 of 69 with model ComponentAnalysis for Validation 3 Model Number: 18 of 69 with model AverageValueNaive for Validation 3 Model Number: 19 of 69 with model AverageValueNaive for Validation 3 Model Number: 20 of 69 with model AverageValueNaive for Validation 3 Model Number: 21 of 69 with model AverageValueNaive for Validation 3 Model Number: 22 of 69 with model AverageValueNaive for Validation 3 Model Number: 23 of 69 with model AverageValueNaive for Validation 3 Model Number: 24 of 69 with model SeasonalNaive for Validation 3 Model Number: 25 of 69 with model SeasonalNaive for Validation 3 Model Number: 26 of 69 with model SeasonalNaive for Validation 3 Model Number: 27 of 69 with model SeasonalNaive for Validation 3 Model Number: 28 of 69 with model SeasonalNaive for Validation 3 Model Number: 29 of 69 with model SeasonalNaive for Validation 3 Model Number: 30 of 69 with model SeasonalNaive for Validation 3 Model Number: 31 of 69 with model ComponentAnalysis for Validation 3 Model Number: 32 of 69 with model ComponentAnalysis for Validation 3 Model Number: 33 of 69 with model ComponentAnalysis for Validation 3 Model Number: 34 of 69 with model GLM for Validation 3 Model Number: 35 of 69 with model GLM for Validation 3 Model Number: 36 of 69 with model GLM for Validation 3 Model Number: 37 of 69 with model GLM for Validation 3 Model Number: 38 of 69 with model GLM for Validation 3 Model Number: 39 of 69 with model GLM for Validation 3 Model Number: 40 of 69 with model GLM for Validation 3 Model Number: 41 of 69 with model GLS for Validation 3 Model Number: 42 of 69 with model GLS for Validation 3 Model Number: 43 of 69 with model GLS for Validation 3 Model Number: 44 of 69 with model GLS for Validation 3 Model Number: 45 of 69 with model AverageValueNaive for Validation 3 Model Number: 46 of 69 with model ComponentAnalysis for Validation 3 Model Number: 47 of 69 with model GLS for Validation 3 Model Number: 48 of 69 with model ComponentAnalysis for Validation 3 Model Number: 49 of 69 with model ETS for Validation 3 Model Number: 50 of 69 with model ETS for Validation 3 Model Number: 51 of 69 with model LastValueNaive for Validation 3 Model Number: 52 of 69 with model ETS for Validation 3 Model Number: 53 of 69 with model ETS for Validation 3 Model Number: 54 of 69 with model ETS for Validation 3 Model Number: 55 of 69 with model ZeroesNaive for Validation 3 Model Number: 56 of 69 with model ZeroesNaive for Validation 3 Model Number: 57 of 69 with model ZeroesNaive for Validation 3 Model Number: 58 of 69 with model ZeroesNaive for Validation 3 Model Number: 59 of 69 with model ZeroesNaive for Validation 3 Model Number: 60 of 69 with model ZeroesNaive for Validation 3 Model Number: 61 of 69 with model ZeroesNaive for Validation 3 Model Number: 62 of 69 with model ETS for Validation 3 Model Number: 63 of 69 with model LastValueNaive for Validation 3 Model Number: 64 of 69 with model LastValueNaive for Validation 3 Model Number: 65 of 69 with model ETS for Validation 3 Model Number: 66 of 69 with model LastValueNaive for Validation 3 Model Number: 67 of 69 with model LastValueNaive for Validation 3 Model Number: 68 of 69 with model LastValueNaive for Validation 3 Model Number: 69 of 69 with model LastValueNaive for Validation 3 Model Number: 1 with model Ensemble in generation 0 of 0 Model Number: 2 with model Ensemble in generation 0 of 0
print(uni_model)
Initiated AutoTS object with best model: Ensemble {} {"model_name": "Horizontal", "model_count": 1, "model_metric": "MAE", "models": {"d5a91290d0706ea8b10f4ec956a92f9f": {"Model": "WindowRegression", "ModelParameters": "{\"window_size\": 20, \"regression_model\": {\"model\": \"Adaboost\", \"model_params\": {\"n_estimators\": 50, \"loss\": \"linear\", \"base_estimator\": \"DecisionTree\", \"learning_rate\": 1.0}}, \"input_dim\": \"multivariate\", \"output_dim\": \"1step\", \"normalize_window\": false, \"shuffle\": false, \"max_windows\": 5000}", "TransformationParameters": "{\"fillna\": \"ffill\", \"transformations\": {\"0\": \"RobustScaler\", \"1\": \"Detrend\"}, \"transformation_params\": {\"0\": {}, \"1\": {\"model\": \"Linear\"}}}"}}, "series": {"grp1": "d5a91290d0706ea8b10f4ec956a92f9f"}}
Models supported are :
# Split training and testing data
forecast_length = 12
multi_train_df = multi_data_df.iloc[:-forecast_length,:]
# Setup automated model testing
multi_model = AutoTS(
forecast_length=forecast_length,
frequency='infer',
prediction_interval=0.9,
max_generations=10, # number of genetic algorithms generations to run
ensemble='all',
model_list='multivariate',
transformer_list='fast',
num_validations=3,
validation_method="backwards",
random_seed=6,
n_jobs='auto'
)
# Run models evaluation
# NOTE - for multivariate models, set time as index, do not keep series_id column and leave rest of variables as columns.
multi_model = multi_model.fit(
multi_train_df
)
Inferred frequency is: D Model Number: 1 with model GluonTS in generation 0 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 1: GluonTS Model Number: 2 with model GluonTS in generation 0 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 2: GluonTS Model Number: 3 with model GluonTS in generation 0 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 3: GluonTS Model Number: 4 with model GluonTS in generation 0 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 4: GluonTS Model Number: 5 with model GluonTS in generation 0 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 5: GluonTS Model Number: 6 with model GluonTS in generation 0 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 6: GluonTS Model Number: 7 with model VAR in generation 0 of 10 Model Number: 8 with model VAR in generation 0 of 10 Model Number: 9 with model VAR in generation 0 of 10 Model Number: 10 with model VECM in generation 0 of 10 Model Number: 11 with model VECM in generation 0 of 10 Model Number: 12 with model VECM in generation 0 of 10 Model Number: 13 with model VECM in generation 0 of 10 Model Number: 14 with model WindowRegression in generation 0 of 10 Model Number: 15 with model VARMAX in generation 0 of 10 Model Number: 16 with model ComponentAnalysis in generation 0 of 10 Template Eval Error: ValueError('NaN, inf or invalid value detected in weights, estimation infeasible.') in model 16: ComponentAnalysis Model Number: 17 with model RollingRegression in generation 0 of 10 Model Number: 18 with model DynamicFactor in generation 0 of 10 Template Eval Error: ValueError('Number of factors must be less than the number of endogenous variables.') in model 18: DynamicFactor Model Number: 19 with model VARMAX in generation 0 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 20 with model VARMAX in generation 0 of 10 Model Number: 21 with model WindowRegression in generation 0 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 21: WindowRegression Model Number: 22 with model WindowRegression in generation 0 of 10 Model Number: 23 with model WindowRegression in generation 0 of 10 Model Number: 24 with model ComponentAnalysis in generation 0 of 10 Model Number: 25 with model VAR in generation 0 of 10 Model Number: 26 with model ComponentAnalysis in generation 0 of 10 Model Number: 27 with model VECM in generation 0 of 10 Model Number: 28 with model VECM in generation 0 of 10 Model Number: 29 with model RollingRegression in generation 0 of 10 Template Eval Error: LightGBMError('Check failed: config->bagging_freq > 0 && config->bagging_fraction < 1.0f && config->bagging_fraction > 0.0f at /__w/1/s/python-package/compile/src/boosting/rf.hpp, line 35 .\n') in model 29: RollingRegression Model Number: 30 with model ComponentAnalysis in generation 0 of 10 Model Number: 31 with model ComponentAnalysis in generation 0 of 10 Model Number: 32 with model GluonTS in generation 0 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 32: GluonTS Model Number: 33 with model VAR in generation 0 of 10 Model Number: 34 with model VECM in generation 0 of 10 Model Number: 35 with model GluonTS in generation 0 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 35: GluonTS Model Number: 36 with model VAR in generation 0 of 10 Model Number: 37 with model RollingRegression in generation 0 of 10 Template Eval Error: ImportError('Tensorflow not available, install with pip install tensorflow.') in model 37: RollingRegression Model Number: 38 with model VECM in generation 0 of 10 Model Number: 39 with model VARMAX in generation 0 of 10 Model Number: 40 with model VARMAX in generation 0 of 10 Template Eval Error: LinAlgError('Matrix is not positive definite') in model 40: VARMAX Model Number: 41 with model WindowRegression in generation 0 of 10 Model Number: 42 with model DynamicFactor in generation 0 of 10 Model Number: 43 with model DynamicFactor in generation 0 of 10 Template Eval Error: ValueError('Number of factors must be less than the number of endogenous variables.') in model 43: DynamicFactor Model Number: 44 with model DynamicFactor in generation 0 of 10 Template Eval Error: ValueError('Number of factors must be less than the number of endogenous variables.') in model 44: DynamicFactor Model Number: 45 with model VAR in generation 0 of 10 Model Number: 46 with model VARMAX in generation 0 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 47 with model VECM in generation 0 of 10 Model Number: 48 with model RollingRegression in generation 0 of 10 Model Number: 49 with model VECM in generation 0 of 10 Model Number: 50 with model WindowRegression in generation 0 of 10 Model Number: 51 with model DynamicFactor in generation 0 of 10 Template Eval Error: ValueError('Number of factors must be less than the number of endogenous variables.') in model 51: DynamicFactor Model Number: 52 with model VECM in generation 0 of 10 Model Number: 53 with model WindowRegression in generation 0 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float32').") in model 53: WindowRegression Model Number: 54 with model GluonTS in generation 0 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 54: GluonTS New Generation: 1 of 10 Model Number: 55 with model VECM in generation 1 of 10 Model Number: 56 with model VECM in generation 1 of 10 Model Number: 57 with model VECM in generation 1 of 10 Model Number: 58 with model VECM in generation 1 of 10 Model Number: 59 with model WindowRegression in generation 1 of 10 [LightGBM] [Warning] Accuracy may be bad since you didn't explicitly set num_leaves OR 2^max_depth > num_leaves. (num_leaves=31). [LightGBM] [Warning] Accuracy may be bad since you didn't explicitly set num_leaves OR 2^max_depth > num_leaves. (num_leaves=31). [LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000075 seconds. You can set `force_col_wise=true` to remove the overhead. [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 59: WindowRegression Model Number: 60 with model WindowRegression in generation 1 of 10 Model Number: 61 with model WindowRegression in generation 1 of 10 Model Number: 62 with model WindowRegression in generation 1 of 10 Model Number: 63 with model VAR in generation 1 of 10 Model Number: 64 with model VAR in generation 1 of 10 Model Number: 65 with model VAR in generation 1 of 10 Model Number: 66 with model VAR in generation 1 of 10 Model Number: 67 with model RollingRegression in generation 1 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 67: RollingRegression Model Number: 68 with model RollingRegression in generation 1 of 10 Model Number: 69 with model RollingRegression in generation 1 of 10 Model Number: 70 with model RollingRegression in generation 1 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 70: RollingRegression Model Number: 71 with model ComponentAnalysis in generation 1 of 10 Model Number: 72 with model ComponentAnalysis in generation 1 of 10 Template Eval Error: ValueError('Model TensorflowSTS with error: NameError("name \'sts\' is not defined")') in model 72: ComponentAnalysis Model Number: 73 with model ComponentAnalysis in generation 1 of 10 Template Eval Error: ValueError('Model TensorflowSTS with error: NameError("name \'sts\' is not defined")') in model 73: ComponentAnalysis Model Number: 74 with model VARMAX in generation 1 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 75 with model VARMAX in generation 1 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 76 with model VARMAX in generation 1 of 10 Model Number: 77 with model VARMAX in generation 1 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 78 with model DynamicFactor in generation 1 of 10 Template Eval Error: ValueError('Number of factors must be less than the number of endogenous variables.') in model 78: DynamicFactor Model Number: 79 with model DynamicFactor in generation 1 of 10 Template Eval Error: ValueError('Number of factors must be less than the number of endogenous variables.') in model 79: DynamicFactor Model Number: 80 with model DynamicFactor in generation 1 of 10 Template Eval Error: ValueError('Number of factors must be less than the number of endogenous variables.') in model 80: DynamicFactor Model Number: 81 with model DynamicFactor in generation 1 of 10 Template Eval Error: ValueError('Number of factors must be less than the number of endogenous variables.') in model 81: DynamicFactor Model Number: 82 with model GluonTS in generation 1 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 82: GluonTS Model Number: 83 with model GluonTS in generation 1 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 83: GluonTS Model Number: 84 with model GluonTS in generation 1 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 84: GluonTS Model Number: 85 with model GluonTS in generation 1 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 85: GluonTS New Generation: 2 of 10 Model Number: 86 with model VECM in generation 2 of 10 Model Number: 87 with model VECM in generation 2 of 10 Model Number: 88 with model VECM in generation 2 of 10 Model Number: 89 with model VECM in generation 2 of 10 Model Number: 90 with model WindowRegression in generation 2 of 10 Template Eval Error: ModuleNotFoundError("No module named 'xgboost'") in model 90: WindowRegression Model Number: 91 with model WindowRegression in generation 2 of 10 Model Number: 92 with model WindowRegression in generation 2 of 10 Model Number: 93 with model WindowRegression in generation 2 of 10 Template Eval Error: ModuleNotFoundError("No module named 'xgboost'") in model 93: WindowRegression Model Number: 94 with model VAR in generation 2 of 10 Model Number: 95 with model VAR in generation 2 of 10 Model Number: 96 with model VAR in generation 2 of 10 Model Number: 97 with model VAR in generation 2 of 10 Model Number: 98 with model RollingRegression in generation 2 of 10 Model Number: 99 with model RollingRegression in generation 2 of 10 Model Number: 100 with model RollingRegression in generation 2 of 10 Model Number: 101 with model RollingRegression in generation 2 of 10 Model Number: 102 with model ComponentAnalysis in generation 2 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 102: ComponentAnalysis Model Number: 103 with model ComponentAnalysis in generation 2 of 10 Template Eval Error: ValueError('Model TensorflowSTS with error: NameError("name \'sts\' is not defined")') in model 103: ComponentAnalysis Model Number: 104 with model ComponentAnalysis in generation 2 of 10 Template Eval Error: ValueError("Model WindowRegression with error: LightGBMError('[gamma]: at least one target label is negative')") in model 104: ComponentAnalysis Model Number: 105 with model VARMAX in generation 2 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 106 with model VARMAX in generation 2 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 107 with model VARMAX in generation 2 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 108 with model VARMAX in generation 2 of 10 Model Number: 109 with model DynamicFactor in generation 2 of 10 Template Eval Error: ValueError('Number of factors must be less than the number of endogenous variables.') in model 109: DynamicFactor Model Number: 110 with model DynamicFactor in generation 2 of 10 Template Eval Error: ValueError('Number of factors must be less than the number of endogenous variables.') in model 110: DynamicFactor Model Number: 111 with model DynamicFactor in generation 2 of 10 Template Eval Error: ValueError('Number of factors must be less than the number of endogenous variables.') in model 111: DynamicFactor Model Number: 112 with model DynamicFactor in generation 2 of 10 Template Eval Error: ValueError('Number of factors must be less than the number of endogenous variables.') in model 112: DynamicFactor Model Number: 113 with model GluonTS in generation 2 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 113: GluonTS Model Number: 114 with model GluonTS in generation 2 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 114: GluonTS Model Number: 115 with model GluonTS in generation 2 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 115: GluonTS Model Number: 116 with model GluonTS in generation 2 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 116: GluonTS New Generation: 3 of 10 Model Number: 117 with model VECM in generation 3 of 10 Model Number: 118 with model VECM in generation 3 of 10 Model Number: 119 with model VECM in generation 3 of 10 Model Number: 120 with model VECM in generation 3 of 10 Model Number: 121 with model WindowRegression in generation 3 of 10 Model Number: 122 with model WindowRegression in generation 3 of 10 Model Number: 123 with model WindowRegression in generation 3 of 10 Model Number: 124 with model WindowRegression in generation 3 of 10 Model Number: 125 with model VAR in generation 3 of 10 Model Number: 126 with model VAR in generation 3 of 10 Model Number: 127 with model VAR in generation 3 of 10 Model Number: 128 with model VAR in generation 3 of 10 Model Number: 129 with model RollingRegression in generation 3 of 10 Model Number: 130 with model RollingRegression in generation 3 of 10 Model Number: 131 with model RollingRegression in generation 3 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 131: RollingRegression Model Number: 132 with model RollingRegression in generation 3 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 132: RollingRegression Model Number: 133 with model ComponentAnalysis in generation 3 of 10 Model Number: 134 with model ComponentAnalysis in generation 3 of 10
INFO:fbprophet:Disabling yearly seasonality. Run prophet with yearly_seasonality=True to override this. INFO:fbprophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this. INFO:fbprophet:Disabling yearly seasonality. Run prophet with yearly_seasonality=True to override this. INFO:fbprophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.
Model Number: 135 with model ComponentAnalysis in generation 3 of 10 Model Number: 136 with model VARMAX in generation 3 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 137 with model VARMAX in generation 3 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 138 with model VARMAX in generation 3 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 139 with model DynamicFactor in generation 3 of 10 Template Eval Error: ValueError('Number of factors must be less than the number of endogenous variables.') in model 139: DynamicFactor Model Number: 140 with model DynamicFactor in generation 3 of 10 Model Number: 141 with model DynamicFactor in generation 3 of 10 Template Eval Error: ValueError('Number of factors must be less than the number of endogenous variables.') in model 141: DynamicFactor Model Number: 142 with model DynamicFactor in generation 3 of 10 Template Eval Error: ValueError('Number of factors must be less than the number of endogenous variables.') in model 142: DynamicFactor Model Number: 143 with model GluonTS in generation 3 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 143: GluonTS Model Number: 144 with model GluonTS in generation 3 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 144: GluonTS Model Number: 145 with model GluonTS in generation 3 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 145: GluonTS Model Number: 146 with model GluonTS in generation 3 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 146: GluonTS New Generation: 4 of 10 Model Number: 147 with model VECM in generation 4 of 10 Model Number: 148 with model VECM in generation 4 of 10 Model Number: 149 with model VECM in generation 4 of 10 Model Number: 150 with model WindowRegression in generation 4 of 10 Model Number: 151 with model WindowRegression in generation 4 of 10 Model Number: 152 with model WindowRegression in generation 4 of 10 Model Number: 153 with model WindowRegression in generation 4 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 153: WindowRegression Model Number: 154 with model VAR in generation 4 of 10 Model Number: 155 with model VAR in generation 4 of 10 Model Number: 156 with model VAR in generation 4 of 10 Model Number: 157 with model VAR in generation 4 of 10 Model Number: 158 with model RollingRegression in generation 4 of 10 Model Number: 159 with model RollingRegression in generation 4 of 10 Model Number: 160 with model RollingRegression in generation 4 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 160: RollingRegression Model Number: 161 with model RollingRegression in generation 4 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 161: RollingRegression Model Number: 162 with model ComponentAnalysis in generation 4 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 162: ComponentAnalysis Model Number: 163 with model ComponentAnalysis in generation 4 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 163: ComponentAnalysis Model Number: 164 with model ComponentAnalysis in generation 4 of 10 Model Number: 165 with model VARMAX in generation 4 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 166 with model VARMAX in generation 4 of 10 Model Number: 167 with model VARMAX in generation 4 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 168 with model VARMAX in generation 4 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 169 with model DynamicFactor in generation 4 of 10 Model Number: 170 with model DynamicFactor in generation 4 of 10 Model Number: 171 with model DynamicFactor in generation 4 of 10 Model Number: 172 with model DynamicFactor in generation 4 of 10 Model Number: 173 with model GluonTS in generation 4 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 173: GluonTS Model Number: 174 with model GluonTS in generation 4 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 174: GluonTS Model Number: 175 with model GluonTS in generation 4 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 175: GluonTS New Generation: 5 of 10 Model Number: 176 with model VECM in generation 5 of 10 Model Number: 177 with model VECM in generation 5 of 10 Model Number: 178 with model VECM in generation 5 of 10 Model Number: 179 with model VECM in generation 5 of 10 Model Number: 180 with model WindowRegression in generation 5 of 10 Model Number: 181 with model WindowRegression in generation 5 of 10
[Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.4s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.9s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 1.6s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 2.0s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished
Model Number: 182 with model WindowRegression in generation 5 of 10 Model Number: 183 with model WindowRegression in generation 5 of 10
[Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.4s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.9s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 1.5s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 2.0s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished
Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float32').") in model 183: WindowRegression Model Number: 184 with model VAR in generation 5 of 10 Model Number: 185 with model VAR in generation 5 of 10 Model Number: 186 with model VAR in generation 5 of 10 Model Number: 187 with model VAR in generation 5 of 10 Model Number: 188 with model RollingRegression in generation 5 of 10 Model Number: 189 with model RollingRegression in generation 5 of 10 Model Number: 190 with model RollingRegression in generation 5 of 10 Model Number: 191 with model RollingRegression in generation 5 of 10 Model Number: 192 with model ComponentAnalysis in generation 5 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 192: ComponentAnalysis Model Number: 193 with model ComponentAnalysis in generation 5 of 10 Model Number: 194 with model ComponentAnalysis in generation 5 of 10 Model Number: 195 with model DynamicFactor in generation 5 of 10 Template Eval Error: MissingDataError('exog contains inf or nans') in model 195: DynamicFactor Model Number: 196 with model DynamicFactor in generation 5 of 10 Model Number: 197 with model DynamicFactor in generation 5 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 198 with model DynamicFactor in generation 5 of 10 Model Number: 199 with model VARMAX in generation 5 of 10 Model Number: 200 with model VARMAX in generation 5 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 201 with model VARMAX in generation 5 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 202 with model VARMAX in generation 5 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 203 with model GluonTS in generation 5 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 203: GluonTS Model Number: 204 with model GluonTS in generation 5 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 204: GluonTS Model Number: 205 with model GluonTS in generation 5 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 205: GluonTS Model Number: 206 with model GluonTS in generation 5 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 206: GluonTS New Generation: 6 of 10 Model Number: 207 with model WindowRegression in generation 6 of 10 Model Number: 208 with model WindowRegression in generation 6 of 10
[Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.4s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.8s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 1.5s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 1.9s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished
Model Number: 209 with model WindowRegression in generation 6 of 10 Model Number: 210 with model WindowRegression in generation 6 of 10 Model Number: 211 with model VECM in generation 6 of 10 Model Number: 212 with model VECM in generation 6 of 10 Model Number: 213 with model VECM in generation 6 of 10 Model Number: 214 with model VECM in generation 6 of 10 Model Number: 215 with model DynamicFactor in generation 6 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 216 with model DynamicFactor in generation 6 of 10 Model Number: 217 with model DynamicFactor in generation 6 of 10 Template Eval Error: ValueError('Number of factors must be less than the number of endogenous variables.') in model 217: DynamicFactor Model Number: 218 with model DynamicFactor in generation 6 of 10 Template Eval Error: ValueError('Number of factors must be less than the number of endogenous variables.') in model 218: DynamicFactor Model Number: 219 with model VAR in generation 6 of 10 Model Number: 220 with model VAR in generation 6 of 10 Model Number: 221 with model VAR in generation 6 of 10 Model Number: 222 with model VAR in generation 6 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 223 with model VARMAX in generation 6 of 10 Model Number: 224 with model VARMAX in generation 6 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 225 with model VARMAX in generation 6 of 10 Model Number: 226 with model VARMAX in generation 6 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 227 with model RollingRegression in generation 6 of 10 Model Number: 228 with model RollingRegression in generation 6 of 10 [LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000049 seconds. You can set `force_col_wise=true` to remove the overhead. [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000046 seconds. You can set `force_col_wise=true` to remove the overhead. [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf Model Number: 229 with model RollingRegression in generation 6 of 10 Model Number: 230 with model RollingRegression in generation 6 of 10 Model Number: 231 with model ComponentAnalysis in generation 6 of 10 Model Number: 232 with model ComponentAnalysis in generation 6 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 232: ComponentAnalysis Model Number: 233 with model ComponentAnalysis in generation 6 of 10 Model Number: 234 with model GluonTS in generation 6 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 234: GluonTS Model Number: 235 with model GluonTS in generation 6 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 235: GluonTS Model Number: 236 with model GluonTS in generation 6 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 236: GluonTS Model Number: 237 with model GluonTS in generation 6 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 237: GluonTS New Generation: 7 of 10 Model Number: 238 with model WindowRegression in generation 7 of 10 Model Number: 239 with model WindowRegression in generation 7 of 10 Model Number: 240 with model WindowRegression in generation 7 of 10 Model Number: 241 with model WindowRegression in generation 7 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 241: WindowRegression Model Number: 242 with model VECM in generation 7 of 10 Model Number: 243 with model VECM in generation 7 of 10 Model Number: 244 with model VECM in generation 7 of 10 Model Number: 245 with model VECM in generation 7 of 10 Model Number: 246 with model DynamicFactor in generation 7 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 247 with model DynamicFactor in generation 7 of 10 Model Number: 248 with model DynamicFactor in generation 7 of 10 Model Number: 249 with model DynamicFactor in generation 7 of 10 Model Number: 250 with model VAR in generation 7 of 10 Template Eval Error: LinAlgError('1-th leading minor of the array is not positive definite') in model 250: VAR Model Number: 251 with model VAR in generation 7 of 10 Model Number: 252 with model VAR in generation 7 of 10 Template Eval Error: LinAlgError('1-th leading minor of the array is not positive definite') in model 252: VAR Model Number: 253 with model VAR in generation 7 of 10 Model Number: 254 with model VARMAX in generation 7 of 10 Model Number: 255 with model VARMAX in generation 7 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 256 with model VARMAX in generation 7 of 10 Model Number: 257 with model RollingRegression in generation 7 of 10 Model Number: 258 with model RollingRegression in generation 7 of 10
[Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.4s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.9s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 1.6s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 2.0s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished
Model Number: 259 with model RollingRegression in generation 7 of 10 Model Number: 260 with model RollingRegression in generation 7 of 10 Model Number: 261 with model ComponentAnalysis in generation 7 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 261: ComponentAnalysis Model Number: 262 with model ComponentAnalysis in generation 7 of 10
INFO:fbprophet:Disabling yearly seasonality. Run prophet with yearly_seasonality=True to override this. INFO:fbprophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.
Model Number: 263 with model ComponentAnalysis in generation 7 of 10
INFO:fbprophet:Disabling yearly seasonality. Run prophet with yearly_seasonality=True to override this. INFO:fbprophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.
Model Number: 264 with model GluonTS in generation 7 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 264: GluonTS Model Number: 265 with model GluonTS in generation 7 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 265: GluonTS Model Number: 266 with model GluonTS in generation 7 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 266: GluonTS Model Number: 267 with model GluonTS in generation 7 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 267: GluonTS New Generation: 8 of 10 Model Number: 268 with model WindowRegression in generation 8 of 10 [LightGBM] [Warning] Accuracy may be bad since you didn't explicitly set num_leaves OR 2^max_depth > num_leaves. (num_leaves=31). [LightGBM] [Warning] Accuracy may be bad since you didn't explicitly set num_leaves OR 2^max_depth > num_leaves. (num_leaves=31). [LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000342 seconds. You can set `force_col_wise=true` to remove the overhead. [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 268: WindowRegression Model Number: 269 with model WindowRegression in generation 8 of 10 Model Number: 270 with model WindowRegression in generation 8 of 10 [LightGBM] [Warning] Accuracy may be bad since you didn't explicitly set num_leaves OR 2^max_depth > num_leaves. (num_leaves=31). [LightGBM] [Warning] Accuracy may be bad since you didn't explicitly set num_leaves OR 2^max_depth > num_leaves. (num_leaves=31). [LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000400 seconds. You can set `force_col_wise=true` to remove the overhead. [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] Accuracy may be bad since you didn't explicitly set num_leaves OR 2^max_depth > num_leaves. (num_leaves=31). [LightGBM] [Warning] Accuracy may be bad since you didn't explicitly set num_leaves OR 2^max_depth > num_leaves. (num_leaves=31). [LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000378 seconds. You can set `force_col_wise=true` to remove the overhead. [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf Model Number: 271 with model WindowRegression in generation 8 of 10 [LightGBM] [Warning] Accuracy may be bad since you didn't explicitly set num_leaves OR 2^max_depth > num_leaves. (num_leaves=31). [LightGBM] [Warning] Accuracy may be bad since you didn't explicitly set num_leaves OR 2^max_depth > num_leaves. (num_leaves=31). [LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.013718 seconds. You can set `force_col_wise=true` to remove the overhead. [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] Accuracy may be bad since you didn't explicitly set num_leaves OR 2^max_depth > num_leaves. (num_leaves=31). [LightGBM] [Warning] Accuracy may be bad since you didn't explicitly set num_leaves OR 2^max_depth > num_leaves. (num_leaves=31). [LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000090 seconds. You can set `force_col_wise=true` to remove the overhead. [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf [LightGBM] [Warning] No further splits with positive gain, best gain: -inf Model Number: 272 with model VECM in generation 8 of 10 Model Number: 273 with model VECM in generation 8 of 10 Model Number: 274 with model VECM in generation 8 of 10 Model Number: 275 with model VECM in generation 8 of 10 Model Number: 276 with model DynamicFactor in generation 8 of 10 Model Number: 277 with model DynamicFactor in generation 8 of 10 Model Number: 278 with model DynamicFactor in generation 8 of 10 Model Number: 279 with model DynamicFactor in generation 8 of 10 Model Number: 280 with model VAR in generation 8 of 10 Model Number: 281 with model VAR in generation 8 of 10 Model Number: 282 with model VAR in generation 8 of 10 Model Number: 283 with model VAR in generation 8 of 10 Model Number: 284 with model VARMAX in generation 8 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 285 with model VARMAX in generation 8 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 286 with model VARMAX in generation 8 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 287 with model RollingRegression in generation 8 of 10 Model Number: 288 with model RollingRegression in generation 8 of 10 Template Eval Error: ImportError('Tensorflow not available, install with pip install tensorflow.') in model 288: RollingRegression Model Number: 289 with model RollingRegression in generation 8 of 10 Template Eval Error: ImportError('Tensorflow not available, install with pip install tensorflow.') in model 289: RollingRegression Model Number: 290 with model RollingRegression in generation 8 of 10 Model Number: 291 with model ComponentAnalysis in generation 8 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 291: ComponentAnalysis Model Number: 292 with model ComponentAnalysis in generation 8 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 292: ComponentAnalysis Model Number: 293 with model ComponentAnalysis in generation 8 of 10 Model Number: 294 with model GluonTS in generation 8 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 294: GluonTS Model Number: 295 with model GluonTS in generation 8 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 295: GluonTS Model Number: 296 with model GluonTS in generation 8 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 296: GluonTS Model Number: 297 with model GluonTS in generation 8 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 297: GluonTS New Generation: 9 of 10 Model Number: 298 with model WindowRegression in generation 9 of 10 Model Number: 299 with model WindowRegression in generation 9 of 10 Model Number: 300 with model WindowRegression in generation 9 of 10
[Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.4s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.9s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 1.6s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 2.0s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished
Model Number: 301 with model WindowRegression in generation 9 of 10 Model Number: 302 with model VECM in generation 9 of 10 Model Number: 303 with model VECM in generation 9 of 10 Model Number: 304 with model VECM in generation 9 of 10 Model Number: 305 with model VECM in generation 9 of 10 Model Number: 306 with model DynamicFactor in generation 9 of 10 Model Number: 307 with model DynamicFactor in generation 9 of 10 Template Eval Error: ValueError('Number of factors must be less than the number of endogenous variables.') in model 307: DynamicFactor Model Number: 308 with model VAR in generation 9 of 10 Model Number: 309 with model VAR in generation 9 of 10 Model Number: 310 with model VAR in generation 9 of 10 Model Number: 311 with model VAR in generation 9 of 10 Model Number: 312 with model VARMAX in generation 9 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 313 with model VARMAX in generation 9 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 314 with model VARMAX in generation 9 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 315 with model VARMAX in generation 9 of 10 Model Number: 316 with model RollingRegression in generation 9 of 10 Model Number: 317 with model RollingRegression in generation 9 of 10 Model Number: 318 with model RollingRegression in generation 9 of 10 Model Number: 319 with model RollingRegression in generation 9 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 319: RollingRegression Model Number: 320 with model ComponentAnalysis in generation 9 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 320: ComponentAnalysis Model Number: 321 with model ComponentAnalysis in generation 9 of 10 Model Number: 322 with model ComponentAnalysis in generation 9 of 10 Template Eval Error: ValueError('n_components and decomposition not suitable for this dataset.') in model 322: ComponentAnalysis Model Number: 323 with model GluonTS in generation 9 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 323: GluonTS Model Number: 324 with model GluonTS in generation 9 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 324: GluonTS Model Number: 325 with model GluonTS in generation 9 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 325: GluonTS Model Number: 326 with model GluonTS in generation 9 of 10 Template Eval Error: ImportError('GluonTS installation not found or installed version is incompatible with AutoTS.') in model 326: GluonTS New Generation: 10 of 10 Model Number: 327 with model WindowRegression in generation 10 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float64').") in model 327: WindowRegression Model Number: 328 with model WindowRegression in generation 10 of 10 Model Number: 329 with model WindowRegression in generation 10 of 10 Model Number: 330 with model VECM in generation 10 of 10 Template Eval Error: LinAlgError('Singular matrix') in model 330: VECM Model Number: 331 with model VECM in generation 10 of 10 Model Number: 332 with model VECM in generation 10 of 10 Model Number: 333 with model VECM in generation 10 of 10 Model Number: 334 with model DynamicFactor in generation 10 of 10 Model Number: 335 with model DynamicFactor in generation 10 of 10 Template Eval Error: ValueError('Number of factors must be less than the number of endogenous variables.') in model 335: DynamicFactor Model Number: 336 with model DynamicFactor in generation 10 of 10 Template Eval Error: ValueError('Number of factors must be less than the number of endogenous variables.') in model 336: DynamicFactor Model Number: 337 with model DynamicFactor in generation 10 of 10 Model Number: 338 with model VAR in generation 10 of 10 Model Number: 339 with model VAR in generation 10 of 10 Model Number: 340 with model VAR in generation 10 of 10 Model Number: 341 with model VAR in generation 10 of 10 Model Number: 342 with model VARMAX in generation 10 of 10 Model Number: 343 with model VARMAX in generation 10 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 344 with model VARMAX in generation 10 of 10 Model Number: 345 with model VARMAX in generation 10 of 10
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 346 with model RollingRegression in generation 10 of 10 Model Number: 347 with model RollingRegression in generation 10 of 10 Model Number: 348 with model RollingRegression in generation 10 of 10 Template Eval Error: ValueError("Input contains NaN, infinity or a value too large for dtype('float32').") in model 348: RollingRegression Model Number: 349 with model RollingRegression in generation 10 of 10 Model Number: 350 with model Ensemble in generation 0 of 0 Model Number: 351 with model Ensemble in generation 0 of 0 Model Number: 352 with model Ensemble in generation 0 of 0 Model Number: 353 with model Ensemble in generation 0 of 0 Model Number: 354 with model Ensemble in generation 0 of 0 Model Number: 355 with model Ensemble in generation 0 of 0 Validation Round: 1 Model Number: 1 of 56 with model Ensemble for Validation 1 Model Number: 2 of 56 with model Ensemble for Validation 1 Model Number: 3 of 56 with model Ensemble for Validation 1 Model Number: 4 of 56 with model Ensemble for Validation 1 Model Number: 5 of 56 with model Ensemble for Validation 1 Model Number: 6 of 56 with model WindowRegression for Validation 1 Model Number: 7 of 56 with model WindowRegression for Validation 1 Model Number: 8 of 56 with model VECM for Validation 1 Model Number: 9 of 56 with model WindowRegression for Validation 1
[Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.4s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.9s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 1.5s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 1.9s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished
Model Number: 10 of 56 with model VECM for Validation 1 Model Number: 11 of 56 with model VECM for Validation 1 Model Number: 12 of 56 with model VECM for Validation 1 Model Number: 13 of 56 with model VECM for Validation 1 Model Number: 14 of 56 with model VECM for Validation 1 Model Number: 15 of 56 with model VECM for Validation 1 Model Number: 16 of 56 with model VECM for Validation 1 Model Number: 17 of 56 with model VARMAX for Validation 1 Model Number: 18 of 56 with model DynamicFactor for Validation 1 Model Number: 19 of 56 with model VARMAX for Validation 1
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 20 of 56 with model WindowRegression for Validation 1 Model Number: 21 of 56 with model WindowRegression for Validation 1 Model Number: 22 of 56 with model WindowRegression for Validation 1 Model Number: 23 of 56 with model WindowRegression for Validation 1 Model Number: 24 of 56 with model Ensemble for Validation 1 Model Number: 25 of 56 with model VAR for Validation 1 Model Number: 26 of 56 with model VAR for Validation 1 Model Number: 27 of 56 with model VARMAX for Validation 1 Model Number: 28 of 56 with model WindowRegression for Validation 1 Model Number: 29 of 56 with model VARMAX for Validation 1 Model Number: 30 of 56 with model VAR for Validation 1 Model Number: 31 of 56 with model VAR for Validation 1 Model Number: 32 of 56 with model VAR for Validation 1 Model Number: 33 of 56 with model DynamicFactor for Validation 1
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 34 of 56 with model DynamicFactor for Validation 1
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 35 of 56 with model VAR for Validation 1 Model Number: 36 of 56 with model VAR for Validation 1 Model Number: 37 of 56 with model RollingRegression for Validation 1 Model Number: 38 of 56 with model VARMAX for Validation 1
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 39 of 56 with model VAR for Validation 1 Model Number: 40 of 56 with model ComponentAnalysis for Validation 1 Model Number: 41 of 56 with model ComponentAnalysis for Validation 1 Model Number: 42 of 56 with model VARMAX for Validation 1
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 43 of 56 with model VARMAX for Validation 1
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 44 of 56 with model RollingRegression for Validation 1 Model Number: 45 of 56 with model VARMAX for Validation 1
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 46 of 56 with model DynamicFactor for Validation 1 Model Number: 47 of 56 with model RollingRegression for Validation 1 Model Number: 48 of 56 with model DynamicFactor for Validation 1
INFO:fbprophet:Disabling yearly seasonality. Run prophet with yearly_seasonality=True to override this. INFO:fbprophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.
Model Number: 49 of 56 with model ComponentAnalysis for Validation 1
INFO:fbprophet:Disabling yearly seasonality. Run prophet with yearly_seasonality=True to override this. INFO:fbprophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.
Model Number: 50 of 56 with model DynamicFactor for Validation 1 Model Number: 51 of 56 with model DynamicFactor for Validation 1 Model Number: 52 of 56 with model RollingRegression for Validation 1 Model Number: 53 of 56 with model ComponentAnalysis for Validation 1 Model Number: 54 of 56 with model RollingRegression for Validation 1 Model Number: 55 of 56 with model VARMAX for Validation 1
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 56 of 56 with model VECM for Validation 1 Validation Round: 2 Model Number: 1 of 56 with model Ensemble for Validation 2 Model Number: 2 of 56 with model Ensemble for Validation 2 Model Number: 3 of 56 with model Ensemble for Validation 2 Model Number: 4 of 56 with model Ensemble for Validation 2 Model Number: 5 of 56 with model Ensemble for Validation 2 Model Number: 6 of 56 with model WindowRegression for Validation 2 Model Number: 7 of 56 with model WindowRegression for Validation 2 Model Number: 8 of 56 with model VECM for Validation 2 Model Number: 9 of 56 with model WindowRegression for Validation 2
[Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.4s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.8s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 1.5s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 1.9s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished
Model Number: 10 of 56 with model VECM for Validation 2 Model Number: 11 of 56 with model VECM for Validation 2 Model Number: 12 of 56 with model VECM for Validation 2 Model Number: 13 of 56 with model VECM for Validation 2 Model Number: 14 of 56 with model VECM for Validation 2 Model Number: 15 of 56 with model VECM for Validation 2 Model Number: 16 of 56 with model VECM for Validation 2 Model Number: 17 of 56 with model VARMAX for Validation 2 Model Number: 18 of 56 with model DynamicFactor for Validation 2 Model Number: 19 of 56 with model VARMAX for Validation 2
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 20 of 56 with model WindowRegression for Validation 2 Model Number: 21 of 56 with model WindowRegression for Validation 2 Model Number: 22 of 56 with model WindowRegression for Validation 2 Model Number: 23 of 56 with model WindowRegression for Validation 2 Model Number: 24 of 56 with model Ensemble for Validation 2 Model Number: 25 of 56 with model VAR for Validation 2 Model Number: 26 of 56 with model VAR for Validation 2 Model Number: 27 of 56 with model VARMAX for Validation 2 Model Number: 28 of 56 with model WindowRegression for Validation 2 Model Number: 29 of 56 with model VARMAX for Validation 2
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 30 of 56 with model VAR for Validation 2 Model Number: 31 of 56 with model VAR for Validation 2 Model Number: 32 of 56 with model VAR for Validation 2 Model Number: 33 of 56 with model DynamicFactor for Validation 2
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 34 of 56 with model DynamicFactor for Validation 2
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 35 of 56 with model VAR for Validation 2 Model Number: 36 of 56 with model VAR for Validation 2 Model Number: 37 of 56 with model RollingRegression for Validation 2 Model Number: 38 of 56 with model VARMAX for Validation 2
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 39 of 56 with model VAR for Validation 2 Model Number: 40 of 56 with model ComponentAnalysis for Validation 2 Model Number: 41 of 56 with model ComponentAnalysis for Validation 2 Model Number: 42 of 56 with model VARMAX for Validation 2
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 43 of 56 with model VARMAX for Validation 2
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 44 of 56 with model RollingRegression for Validation 2 Model Number: 45 of 56 with model VARMAX for Validation 2
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 46 of 56 with model DynamicFactor for Validation 2 Model Number: 47 of 56 with model RollingRegression for Validation 2 Model Number: 48 of 56 with model DynamicFactor for Validation 2
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to " INFO:fbprophet:Disabling yearly seasonality. Run prophet with yearly_seasonality=True to override this. INFO:fbprophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.
Model Number: 49 of 56 with model ComponentAnalysis for Validation 2
INFO:fbprophet:Disabling yearly seasonality. Run prophet with yearly_seasonality=True to override this. INFO:fbprophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.
Model Number: 50 of 56 with model DynamicFactor for Validation 2 Model Number: 51 of 56 with model DynamicFactor for Validation 2 Model Number: 52 of 56 with model RollingRegression for Validation 2 Model Number: 53 of 56 with model ComponentAnalysis for Validation 2 Model Number: 54 of 56 with model RollingRegression for Validation 2 Model Number: 55 of 56 with model VARMAX for Validation 2
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 56 of 56 with model VECM for Validation 2 Validation Round: 3 Model Number: 1 of 56 with model Ensemble for Validation 3 Model Number: 2 of 56 with model Ensemble for Validation 3 Model Number: 3 of 56 with model Ensemble for Validation 3 Model Number: 4 of 56 with model Ensemble for Validation 3
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 5 of 56 with model Ensemble for Validation 3 Model Number: 6 of 56 with model WindowRegression for Validation 3 Model Number: 7 of 56 with model WindowRegression for Validation 3 Model Number: 8 of 56 with model VECM for Validation 3 Model Number: 9 of 56 with model WindowRegression for Validation 3
[Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.4s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.9s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 1.6s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 2.0s finished [Parallel(n_jobs=6)]: Using backend ThreadingBackend with 6 concurrent workers. [Parallel(n_jobs=6)]: Done 38 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 188 tasks | elapsed: 0.0s [Parallel(n_jobs=6)]: Done 438 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 788 tasks | elapsed: 0.1s [Parallel(n_jobs=6)]: Done 1000 out of 1000 | elapsed: 0.2s finished
Model Number: 10 of 56 with model VECM for Validation 3 Model Number: 11 of 56 with model VECM for Validation 3 Model Number: 12 of 56 with model VECM for Validation 3 Model Number: 13 of 56 with model VECM for Validation 3 Model Number: 14 of 56 with model VECM for Validation 3 Model Number: 15 of 56 with model VECM for Validation 3 Model Number: 16 of 56 with model VECM for Validation 3 Model Number: 17 of 56 with model VARMAX for Validation 3
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 18 of 56 with model DynamicFactor for Validation 3 Model Number: 19 of 56 with model VARMAX for Validation 3
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 20 of 56 with model WindowRegression for Validation 3 Model Number: 21 of 56 with model WindowRegression for Validation 3 Model Number: 22 of 56 with model WindowRegression for Validation 3 Model Number: 23 of 56 with model WindowRegression for Validation 3 Model Number: 24 of 56 with model Ensemble for Validation 3 Model Number: 25 of 56 with model VAR for Validation 3 Model Number: 26 of 56 with model VAR for Validation 3 Model Number: 27 of 56 with model VARMAX for Validation 3 Model Number: 28 of 56 with model WindowRegression for Validation 3 Model Number: 29 of 56 with model VARMAX for Validation 3
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 30 of 56 with model VAR for Validation 3 Model Number: 31 of 56 with model VAR for Validation 3 Model Number: 32 of 56 with model VAR for Validation 3 Model Number: 33 of 56 with model DynamicFactor for Validation 3 Model Number: 34 of 56 with model DynamicFactor for Validation 3 Model Number: 35 of 56 with model VAR for Validation 3 Model Number: 36 of 56 with model VAR for Validation 3 Model Number: 37 of 56 with model RollingRegression for Validation 3 Model Number: 38 of 56 with model VARMAX for Validation 3
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 39 of 56 with model VAR for Validation 3 Model Number: 40 of 56 with model ComponentAnalysis for Validation 3 Model Number: 41 of 56 with model ComponentAnalysis for Validation 3 Model Number: 42 of 56 with model VARMAX for Validation 3
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 43 of 56 with model VARMAX for Validation 3
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 44 of 56 with model RollingRegression for Validation 3 Model Number: 45 of 56 with model VARMAX for Validation 3
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 46 of 56 with model DynamicFactor for Validation 3 Model Number: 47 of 56 with model RollingRegression for Validation 3 Model Number: 48 of 56 with model DynamicFactor for Validation 3
INFO:fbprophet:Disabling yearly seasonality. Run prophet with yearly_seasonality=True to override this. INFO:fbprophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.
Model Number: 49 of 56 with model ComponentAnalysis for Validation 3
INFO:fbprophet:Disabling yearly seasonality. Run prophet with yearly_seasonality=True to override this. INFO:fbprophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.
Model Number: 50 of 56 with model DynamicFactor for Validation 3 Model Number: 51 of 56 with model DynamicFactor for Validation 3 Model Number: 52 of 56 with model RollingRegression for Validation 3 Model Number: 53 of 56 with model ComponentAnalysis for Validation 3 Model Number: 54 of 56 with model RollingRegression for Validation 3 Model Number: 55 of 56 with model VARMAX for Validation 3
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
Model Number: 56 of 56 with model VECM for Validation 3 Model Number: 1 with model Ensemble in generation 0 of 0 Model Number: 2 with model Ensemble in generation 0 of 0
print(multi_model)
Initiated AutoTS object with best model: Ensemble {} {"model_name": "Horizontal", "model_count": 2, "model_metric": "MAE", "models": {"3b86dff30168d950ba38ca940272e6ec": {"Model": "Ensemble", "ModelParameters": "{\"model_name\": \"BestN\", \"model_count\": 3, \"model_metric\": \"best_score\", \"models\": {\"96182529c2d1755efddd886928d06f8a\": {\"Model\": \"WindowRegression\", \"ModelParameters\": \"{\\\"window_size\\\": 10, \\\"regression_model\\\": {\\\"model\\\": \\\"KNN\\\", \\\"model_params\\\": {\\\"n_neighbors\\\": 3, \\\"weights\\\": \\\"distance\\\"}}, \\\"input_dim\\\": \\\"multivariate\\\", \\\"output_dim\\\": \\\"1step\\\", \\\"normalize_window\\\": false, \\\"shuffle\\\": true, \\\"max_windows\\\": 5000}\", \"TransformationParameters\": \"{\\\"fillna\\\": \\\"mean\\\", \\\"transformations\\\": {\\\"0\\\": \\\"ClipOutliers\\\", \\\"1\\\": \\\"Discretize\\\", \\\"2\\\": \\\"Detrend\\\"}, \\\"transformation_params\\\": {\\\"0\\\": {\\\"method\\\": \\\"clip\\\", \\\"std_threshold\\\": 2, \\\"fillna\\\": null}, \\\"1\\\": {\\\"discretization\\\": \\\"lower\\\", \\\"n_bins\\\": 10}, \\\"2\\\": {\\\"model\\\": \\\"GLS\\\"}}}\"}, \"d87b0806af03814ad9a5ddda0aea1624\": {\"Model\": \"WindowRegression\", \"ModelParameters\": \"{\\\"window_size\\\": 10, \\\"regression_model\\\": {\\\"model\\\": \\\"KNN\\\", \\\"model_params\\\": {\\\"n_neighbors\\\": 3, \\\"weights\\\": \\\"distance\\\"}}, \\\"input_dim\\\": \\\"multivariate\\\", \\\"output_dim\\\": \\\"1step\\\", \\\"normalize_window\\\": false, \\\"shuffle\\\": false, \\\"max_windows\\\": 5000}\", \"TransformationParameters\": \"{\\\"fillna\\\": \\\"mean\\\", \\\"transformations\\\": {\\\"0\\\": \\\"ClipOutliers\\\", \\\"1\\\": \\\"Discretize\\\", \\\"2\\\": \\\"Detrend\\\"}, \\\"transformation_params\\\": {\\\"0\\\": {\\\"method\\\": \\\"clip\\\", \\\"std_threshold\\\": 2, \\\"fillna\\\": null}, \\\"1\\\": {\\\"discretization\\\": \\\"lower\\\", \\\"n_bins\\\": 10}, \\\"2\\\": {\\\"model\\\": \\\"GLS\\\"}}}\"}, \"8974d6328ac20141e8472d19745d1209\": {\"Model\": \"VECM\", \"ModelParameters\": \"{\\\"deterministic\\\": \\\"ci\\\", \\\"k_ar_diff\\\": 3, \\\"regression_type\\\": null}\", \"TransformationParameters\": \"{\\\"fillna\\\": \\\"zero\\\", \\\"transformations\\\": {\\\"0\\\": \\\"ClipOutliers\\\", \\\"1\\\": \\\"Discretize\\\", \\\"2\\\": \\\"Detrend\\\", \\\"3\\\": \\\"ClipOutliers\\\", \\\"4\\\": \\\"Log\\\"}, \\\"transformation_params\\\": {\\\"0\\\": {\\\"method\\\": \\\"clip\\\", \\\"std_threshold\\\": 2, \\\"fillna\\\": null}, \\\"1\\\": {\\\"discretization\\\": \\\"lower\\\", \\\"n_bins\\\": 10}, \\\"2\\\": {\\\"model\\\": \\\"GLS\\\"}, \\\"3\\\": {\\\"method\\\": \\\"clip\\\", \\\"std_threshold\\\": 3, \\\"fillna\\\": null}, \\\"4\\\": {}}}\"}}}", "TransformationParameters": "{}"}, "0d6bd50098b3cf2ba038b3fd3193e30c": {"Model": "Ensemble", "ModelParameters": "{\"model_name\": \"BestN\", \"model_count\": 3, \"model_metric\": \"best_score_unique\", \"models\": {\"96182529c2d1755efddd886928d06f8a\": {\"Model\": \"WindowRegression\", \"ModelParameters\": \"{\\\"window_size\\\": 10, \\\"regression_model\\\": {\\\"model\\\": \\\"KNN\\\", \\\"model_params\\\": {\\\"n_neighbors\\\": 3, \\\"weights\\\": \\\"distance\\\"}}, \\\"input_dim\\\": \\\"multivariate\\\", \\\"output_dim\\\": \\\"1step\\\", \\\"normalize_window\\\": false, \\\"shuffle\\\": true, \\\"max_windows\\\": 5000}\", \"TransformationParameters\": \"{\\\"fillna\\\": \\\"mean\\\", \\\"transformations\\\": {\\\"0\\\": \\\"ClipOutliers\\\", \\\"1\\\": \\\"Discretize\\\", \\\"2\\\": \\\"Detrend\\\"}, \\\"transformation_params\\\": {\\\"0\\\": {\\\"method\\\": \\\"clip\\\", \\\"std_threshold\\\": 2, \\\"fillna\\\": null}, \\\"1\\\": {\\\"discretization\\\": \\\"lower\\\", \\\"n_bins\\\": 10}, \\\"2\\\": {\\\"model\\\": \\\"GLS\\\"}}}\"}, \"8974d6328ac20141e8472d19745d1209\": {\"Model\": \"VECM\", \"ModelParameters\": \"{\\\"deterministic\\\": \\\"ci\\\", \\\"k_ar_diff\\\": 3, \\\"regression_type\\\": null}\", \"TransformationParameters\": \"{\\\"fillna\\\": \\\"zero\\\", \\\"transformations\\\": {\\\"0\\\": \\\"ClipOutliers\\\", \\\"1\\\": \\\"Discretize\\\", \\\"2\\\": \\\"Detrend\\\", \\\"3\\\": \\\"ClipOutliers\\\", \\\"4\\\": \\\"Log\\\"}, \\\"transformation_params\\\": {\\\"0\\\": {\\\"method\\\": \\\"clip\\\", \\\"std_threshold\\\": 2, \\\"fillna\\\": null}, \\\"1\\\": {\\\"discretization\\\": \\\"lower\\\", \\\"n_bins\\\": 10}, \\\"2\\\": {\\\"model\\\": \\\"GLS\\\"}, \\\"3\\\": {\\\"method\\\": \\\"clip\\\", \\\"std_threshold\\\": 3, \\\"fillna\\\": null}, \\\"4\\\": {}}}\"}, \"6e3f3bff3404b65e00b85af8aaa10ccc\": {\"Model\": \"VARMAX\", \"ModelParameters\": \"{\\\"order\\\": [2, 0], \\\"trend\\\": \\\"n\\\"}\", \"TransformationParameters\": \"{\\\"fillna\\\": \\\"ffill_mean_biased\\\", \\\"transformations\\\": {\\\"0\\\": \\\"StandardScaler\\\", \\\"1\\\": \\\"SeasonalDifference\\\", \\\"2\\\": \\\"Detrend\\\"}, \\\"transformation_params\\\": {\\\"0\\\": {}, \\\"1\\\": {\\\"lag_1\\\": 12, \\\"method\\\": \\\"Mean\\\"}, \\\"2\\\": {\\\"model\\\": \\\"GLS\\\"}}}\"}}}", "TransformationParameters": "{}"}}, "series": {"v1": "3b86dff30168d950ba38ca940272e6ec", "v2": "0d6bd50098b3cf2ba038b3fd3193e30c"}}
# Make prediction for next 12 months
uni_prediction = uni_model.predict(forecast_length=12)
multi_prediction = multi_model.predict(forecast_length=12)
# Split into point, upper and lower forecasts
# Get point forecasts dataframe
uni_forecasts_point = uni_prediction.forecast
multi_forecasts_point = multi_prediction.forecast
# Get upper and lower forecasts
uni_forecasts_up, uni_forecasts_low = uni_prediction.upper_forecast, uni_prediction.lower_forecast
multi_forecasts_up, multi_forecasts_low = multi_prediction.upper_forecast, multi_prediction.lower_forecast
# Prepare original data for plotting
uni_data_df.index = pd.to_datetime(uni_data_df['time'])
uni_data_df = uni_data_df.drop(columns=['time', 'series_id'])
uni_data_df.columns = ['actual']
multi_data_df.columns = [
'v1_actual', 'v2_actual'
]
# Combine data into dataframes for plotting
uni_plot_df = uni_forecasts_point.reset_index()\
.merge(uni_forecasts_up.reset_index(), on='index')\
.merge(uni_forecasts_low.reset_index(), on='index')
uni_plot_df.columns = ['time', 'point', 'upper', 'lower']
uni_plot_df = uni_plot_df.set_index('time')
uni_plot_df = uni_data_df.merge(uni_plot_df, how='left', left_index=True, right_index=True)
multi_plot_df = multi_forecasts_point.reset_index()\
.merge(multi_forecasts_up.reset_index(), on='index')\
.merge(multi_forecasts_low.reset_index(), on='index')
multi_plot_df.columns = ['time',
'v1_point', 'v2_point',
'v1_upper', 'v2_upper',
'v1_lower', 'v2_lower'
]
multi_plot_df = multi_plot_df.set_index('time')
multi_plot_df = multi_data_df.merge(multi_plot_df, how='left', left_index=True, right_index=True)
/home/cylim/anaconda3/envs/experiment_lab/lib/python3.8/site-packages/statsmodels/base/model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals warnings.warn("Maximum Likelihood optimization failed to "
# Plot predictions
uni_plot_df.plot()
multi_plot_df.plot()
<AxesSubplot:xlabel='time'>