Im trying to forecast a time series using prophet model in python, for which I would like to find the optimal tuning parameters (like changepoint_range, changepoint_prior_scale, seasonality_prior_scale, holiday_prior_scale, holiday_mode) for the time series by minimising the error (MAPE) in test and train validation method and predict the future.
def mape(y_true, y_pred):
return round(np.mean(np.abs(((y_true-y_pred)/y_true)*100)),2)
Currently, I am using the optuna and tuner libraries but it only takes number (trials 50; 10 iterations and 50 samples) and variation in account, but I would like to consider all the parameter variations in account to provide the optimal tuning.
def objective(trial):
params={'changepoint_range': trial.suggest_loguniform('changepoint_range', 0.1, 1),
'changepoint_prior_scale' : trial.suggest_loguniform('changepoint_prior_scale', 0.01, 0.5),
'seasonality_prior_scale': trial.suggest_loguniform('seasonality_prior_scale', 0.01, 10),
'holidays_prior_scale': trial.suggest_loguniform('holidays_prior_scale', 0.01, 10),
'holidays_mode': trial.suggest_categorical('holidays_mode', ['additive', 'multiplicative'])
}
model = Prophet()
model.fit(Train)
future= model.make_future_dataframe(periods= tst)
forecast= model.predict(future)
prediction= forecast.tail(tst)
return mape(Test['y'], prediction['yhat'])
study = optuna.create_study(direction='minimize')
study.optimize(objective, n_trials=50)
def objective_function(args_list):
global Train, Test
params_evaluated= []
results= []
for params in args_list:
try:
model = Prophet()
model.fit(Train)
future= model.make_future_dataframe(periods= tst)
forecast= model.predict(future)
prediction= forecast.tail(tst)
error= mape(Test['y'], prediction['yhat'])
params_evaluated.append(params)
results.append(error)
except Exception as e:
print(f"Error for params {params}: {e}")
params_evaluated.append(params)
results.append(25)
return params_evaluated, results
from scipy.stats import uniform
params_space= dict(changepoint_range= uniform(0.5,0.5),
changepoint_prior_scale= uniform(0.001,0.5),
seasonality_prior_scale= uniform(0.01,10),
holidays_prior_scale= uniform(0.01, 10),
holidays_mode= ['additive', 'multiplicative']
)
conf_dict= dict()
conf_dict['initial_random']= 10
conf_dict['num_iteration']= 50
tuner= Tuner(params_space, objective_function, conf_dict)
results= tuner.minimize()
Im trying to forecast a time series using prophet model in python, for which I would like to find the optimal tuning parameters (like changepoint_range, changepoint_prior_scale, seasonality_prior_scale, holiday_prior_scale, holiday_mode) for the time series by minimising the error (MAPE) in test and train validation method and predict the future.
def mape(y_true, y_pred):
return round(np.mean(np.abs(((y_true-y_pred)/y_true)*100)),2)
Currently, I am using the optuna and tuner libraries but it only takes number (trials 50; 10 iterations and 50 samples) and variation in account, but I would like to consider all the parameter variations in account to provide the optimal tuning.
def objective(trial):
params={'changepoint_range': trial.suggest_loguniform('changepoint_range', 0.1, 1),
'changepoint_prior_scale' : trial.suggest_loguniform('changepoint_prior_scale', 0.01, 0.5),
'seasonality_prior_scale': trial.suggest_loguniform('seasonality_prior_scale', 0.01, 10),
'holidays_prior_scale': trial.suggest_loguniform('holidays_prior_scale', 0.01, 10),
'holidays_mode': trial.suggest_categorical('holidays_mode', ['additive', 'multiplicative'])
}
model = Prophet()
model.fit(Train)
future= model.make_future_dataframe(periods= tst)
forecast= model.predict(future)
prediction= forecast.tail(tst)
return mape(Test['y'], prediction['yhat'])
study = optuna.create_study(direction='minimize')
study.optimize(objective, n_trials=50)
def objective_function(args_list):
global Train, Test
params_evaluated= []
results= []
for params in args_list:
try:
model = Prophet()
model.fit(Train)
future= model.make_future_dataframe(periods= tst)
forecast= model.predict(future)
prediction= forecast.tail(tst)
error= mape(Test['y'], prediction['yhat'])
params_evaluated.append(params)
results.append(error)
except Exception as e:
print(f"Error for params {params}: {e}")
params_evaluated.append(params)
results.append(25)
return params_evaluated, results
from scipy.stats import uniform
params_space= dict(changepoint_range= uniform(0.5,0.5),
changepoint_prior_scale= uniform(0.001,0.5),
seasonality_prior_scale= uniform(0.01,10),
holidays_prior_scale= uniform(0.01, 10),
holidays_mode= ['additive', 'multiplicative']
)
conf_dict= dict()
conf_dict['initial_random']= 10
conf_dict['num_iteration']= 50
tuner= Tuner(params_space, objective_function, conf_dict)
results= tuner.minimize()
Share
Improve this question
asked Apr 1 at 10:28
Arun Raaj RajendhiranArun Raaj Rajendhiran
11 bronze badge
New contributor
Arun Raaj Rajendhiran is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.
1 Answer
Reset to default 0You need to ensure you are passing your parameter space to the model for tuning. Not sure what you mean by considering 'all parameter variations', searching all variations is generally not feasible. Searching with optuna samples between the specified lower and upper bound from a log uniform distribution. I've included a sample dataframe so you can see how the parameters are selected.
import optuna
from prophet import Prophet
import pandas as pd
from sklearn.metrics import mean_absolute_percentage_error as mape
data = {
'ds': pd.date_range(start='2022-01-01', periods=100, freq='D'),
'y': [i + (i * 0.1) for i in range(100)]
}
df = pd.DataFrame(data)
Train = df.iloc[:80]
Test = df.iloc[80:]
fh = len(Test)
def objective(trial):
params = {
'changepoint_range': trial.suggest_float('changepoint_range', 0.1, 1, log=True),
'changepoint_prior_scale': trial.suggest_float('changepoint_prior_scale', 0.01, 0.5, log=True),
'seasonality_prior_scale': trial.suggest_float('seasonality_prior_scale', 0.01, 10, log=True),
'holidays_prior_scale': trial.suggest_float('holidays_prior_scale', 0.01, 10, log=True),
'holidays_mode': trial.suggest_categorical('holidays_mode', ['additive', 'multiplicative'])
}
# Ensure we are passing parameter space to the model
model = Prophet(
changepoint_range=params['changepoint_range'],
changepoint_prior_scale=params['changepoint_prior_scale'],
seasonality_prior_scale=params['seasonality_prior_scale'],
holidays_prior_scale=params['holidays_prior_scale'],
holidays_mode=params['holidays_mode']
)
model.fit(Train)
future = model.make_future_dataframe(periods=fh)
forecast = model.predict(future)
prediction = forecast.tail(fh)
# Using sklearn metric but can use your metric function as well
return mape(Test['y'], prediction['yhat'])
# Create a study and optimize the objective function
study = optuna.create_study(direction='minimize')
# Setting low n_trials for example
study.optimize(objective, n_trials=5)
print("Best parameters: ", study.best_params)
print("Best MAPE: ", study.best_value)