Skip to content

Commit 9e65a16

Browse files
committed
removed commented code
1 parent 577cb23 commit 9e65a16

File tree

1 file changed

+0
-142
lines changed

1 file changed

+0
-142
lines changed

ads/opctl/operator/lowcode/forecast/model/neuralprophet.py

Lines changed: 0 additions & 142 deletions
Original file line numberDiff line numberDiff line change
@@ -240,148 +240,6 @@ def _build_model(self) -> pd.DataFrame:
240240

241241
self.models = [self.models_dict[target] for target in self.target_columns]
242242

243-
# # Extract the Confidence Interval Width and
244-
# # convert to neural prophets equivalent - quantiles
245-
# model_kwargs = self.spec.model_kwargs
246-
247-
# if self.spec.confidence_interval_width is None:
248-
# quantiles = model_kwargs.get("quantiles", [0.05, 0.95])
249-
# self.spec.confidence_interval_width = float(quantiles[1]) - float(
250-
# quantiles[0]
251-
# )
252-
# else:
253-
# boundaries = round((1 - self.spec.confidence_interval_width) / 2, 2)
254-
# quantiles = [boundaries, self.spec.confidence_interval_width + boundaries]
255-
256-
# model_kwargs["quantiles"] = quantiles
257-
# self.forecast_output = ForecastOutput(
258-
# confidence_interval_width=self.spec.confidence_interval_width
259-
# )
260-
261-
# for i, (target, df) in enumerate(full_data_dict.items()):
262-
# le, df_encoded = utils._label_encode_dataframe(
263-
# df, no_encode={self.spec.datetime_column.name, target}
264-
# )
265-
# model_kwargs_i = model_kwargs.copy()
266-
267-
# # format the dataframe for this target. Dropping NA on target[df] will remove all future data
268-
# df_clean = self._preprocess(
269-
# df_encoded,
270-
# self.spec.datetime_column.name,
271-
# self.spec.datetime_column.format,
272-
# )
273-
# data_i = df_clean[df_clean[target].notna()]
274-
# data_i.rename({target: "y"}, axis=1, inplace=True)
275-
276-
# # Assume that all columns passed in should be used as additional data
277-
# additional_regressors = set(data_i.columns) - {"y", "ds"}
278-
# training_data = data_i[["y", "ds"] + list(additional_regressors)]
279-
280-
# if self.perform_tuning:
281-
282-
# def objective(trial):
283-
# params = {
284-
# # 'seasonality_mode': trial.suggest_categorical('seasonality_mode', ['additive', 'multiplicative']),
285-
# # 'seasonality_reg': trial.suggest_float('seasonality_reg', 0.1, 500, log=True),
286-
# # 'learning_rate': trial.suggest_float('learning_rate', 0.0001, 0.1, log=True),
287-
# "newer_samples_start": trial.suggest_float(
288-
# "newer_samples_start", 0.001, 0.999
289-
# ),
290-
# "newer_samples_weight": trial.suggest_float(
291-
# "newer_samples_weight", 0, 100
292-
# ),
293-
# "changepoints_range": trial.suggest_float(
294-
# "changepoints_range", 0.8, 0.95
295-
# ),
296-
# }
297-
# # trend_reg, trend_reg_threshold, ar_reg, impute_rolling/impute_linear,
298-
# params.update(model_kwargs_i)
299-
300-
# folds = NeuralProphet(**params).crossvalidation_split_df(
301-
# data_i, k=3
302-
# )
303-
# test_metrics_total_i = []
304-
# for df_train, df_test in folds:
305-
# m, accepted_regressors = _fit_model(
306-
# data=df_train,
307-
# params=params,
308-
# additional_regressors=additional_regressors,
309-
# select_metric=self.spec.metric,
310-
# )
311-
# df_test = df_test[["y", "ds"] + accepted_regressors]
312-
313-
# test_forecast_i = m.predict(df=df_test)
314-
# fold_metric_i = (
315-
# m.metrics[self.spec.metric]
316-
# .forward(
317-
# Tensor(test_forecast_i["yhat1"]),
318-
# Tensor(test_forecast_i["y"]),
319-
# )
320-
# .item()
321-
# )
322-
# test_metrics_total_i.append(fold_metric_i)
323-
# logger.debug(
324-
# f"----------------------{np.asarray(test_metrics_total_i).mean()}----------------------"
325-
# )
326-
# return np.asarray(test_metrics_total_i).mean()
327-
328-
# study = optuna.create_study(direction="minimize")
329-
# m_params = NeuralProphet().parameters()
330-
# study.enqueue_trial(
331-
# {
332-
# # 'seasonality_mode': m_params['seasonality_mode'],
333-
# # 'seasonality_reg': m_params['seasonality_reg'],
334-
# # 'learning_rate': m_params['learning_rate'],
335-
# "newer_samples_start": m_params["newer_samples_start"],
336-
# "newer_samples_weight": m_params["newer_samples_weight"],
337-
# "changepoints_range": m_params["changepoints_range"],
338-
# }
339-
# )
340-
# study.optimize(
341-
# objective,
342-
# n_trials=self.spec.tuning.n_trials
343-
# if self.spec.tuning
344-
# else DEFAULT_TRIALS,
345-
# n_jobs=-1,
346-
# )
347-
348-
# selected_params = study.best_params
349-
# selected_params.update(model_kwargs_i)
350-
# model_kwargs_i = selected_params
351-
352-
# # Build and fit model
353-
# model, accepted_regressors = _fit_model(
354-
# data=training_data,
355-
# params=model_kwargs_i,
356-
# additional_regressors=additional_regressors,
357-
# select_metric=self.spec.metric,
358-
# )
359-
# logger.debug(
360-
# f"Found the following additional data columns: {additional_regressors}"
361-
# )
362-
# logger.debug(
363-
# f"While fitting the model, some additional data may have been "
364-
# f"discarded. Only using the columns: {accepted_regressors}"
365-
# )
366-
367-
# # Build future dataframe
368-
# future = df_clean.reset_index(drop=True)
369-
# future["y"] = None
370-
# future = future[["y", "ds"] + list(accepted_regressors)]
371-
372-
# # Forecast model and collect outputs
373-
# forecast = model.predict(future)
374-
# logger.debug(f"-----------------Model {i}----------------------")
375-
# logger.debug(forecast.tail())
376-
# models.append(model)
377-
# outputs[target] = forecast
378-
# outputs_legacy.append(forecast)
379-
380-
# self.models = models
381-
# self.outputs = outputs_legacy
382-
383-
# logger.debug("===========Done===========")
384-
385243
# Merge the outputs from each model into 1 df with all outputs by target and category
386244
col = self.original_target_column
387245
output_col = pd.DataFrame()

0 commit comments

Comments
 (0)