diff --git a/execution/run_uc.py b/execution/run_uc.py
index e1db5f509db911e7ccaba3e73b2216f1b1c326a7..d62b117089d67c2f08a0096ea0879a36734d2fb8 100644
--- a/execution/run_uc.py
+++ b/execution/run_uc.py
@@ -287,8 +287,10 @@ def run_evaluation(exp_id, uc_id, dim_value, instances, execution_minutes, prome
                           execution_minutes, prometheus_base_url,
                           result_path)
     except Exception as e:
-        print('Evaluation function failed')
-        logging.exception('Evaluation function failed')
+        err_msg = 'Evaluation function failed'
+        print(err_msg)
+        logging.exception(err_msg)
+        print('Benchmark continuous')
 
     return
 
diff --git a/execution/strategies/subexperiment_evaluation/subexperiment_evaluator.py b/execution/strategies/subexperiment_evaluation/subexperiment_evaluator.py
index 870ec0cd7be41a939f99eb00b81fde95e059dd47..442a7999cc0c50257f27271e3c41e6c85c0cecee 100644
--- a/execution/strategies/subexperiment_evaluation/subexperiment_evaluator.py
+++ b/execution/strategies/subexperiment_evaluation/subexperiment_evaluator.py
@@ -13,8 +13,10 @@ def execute(config):
     try:
         trend_slope = trend_slope_computer.compute(cwd, file, WARMUP_SEC, THRESHOLD)
     except Exception as e:
-        print('Computing trend slope failed.')
-        logging.exception('Computing trend slope failed.')
+        err_msg = 'Computing trend slope failed'
+        print(err_msg)
+        logging.exception(err_msg)
+        print('Mark this subexperiment as not successful and continue benchmark')
         return 0