This is an automated email from the ASF dual-hosted git repository.

cdionysio pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/systemds.git


The following commit(s) were added to refs/heads/main by this push:
     new 608ddcdcb6 [SYTEMDS-3937] Access measures in optimizers correctly
608ddcdcb6 is described below

commit 608ddcdcb62209f87a94d6ac049b11ef97728231
Author: Christina Dionysio <[email protected]>
AuthorDate: Mon Dec 8 11:35:48 2025 +0100

    [SYTEMDS-3937] Access measures in optimizers correctly
    
    This patch fixes some errors in how the optimizers access the performance 
meausures and correctly execute the ranking functionality.
---
 .../scuro/drsearch/hyperparameter_tuner.py         |   4 +-
 .../scuro/drsearch/multimodal_optimizer.py         |  18 ++-
 src/main/python/systemds/scuro/drsearch/ranking.py |  25 +++--
 src/main/python/systemds/scuro/drsearch/task.py    | 123 ++++++++++++---------
 .../systemds/scuro/drsearch/unimodal_optimizer.py  |  33 ++++--
 .../python/systemds/scuro/modality/modality.py     |   2 +
 src/main/python/systemds/scuro/modality/type.py    |   4 +-
 .../scuro/representations/concatenation.py         |   2 +-
 .../systemds/scuro/representations/fusion.py       |  35 ++----
 .../representations/timeseries_representations.py  |   2 +-
 src/main/python/tests/scuro/data_generator.py      |   8 +-
 .../python/tests/scuro/test_multimodal_fusion.py   |  22 ++--
 .../python/tests/scuro/test_unimodal_optimizer.py  |   4 +-
 .../tests/scuro/test_unimodal_representations.py   |   1 -
 14 files changed, 157 insertions(+), 126 deletions(-)

diff --git a/src/main/python/systemds/scuro/drsearch/hyperparameter_tuner.py 
b/src/main/python/systemds/scuro/drsearch/hyperparameter_tuner.py
index 8c5e4c24e1..a90485dfa2 100644
--- a/src/main/python/systemds/scuro/drsearch/hyperparameter_tuner.py
+++ b/src/main/python/systemds/scuro/drsearch/hyperparameter_tuner.py
@@ -103,7 +103,9 @@ class HyperparameterTuner:
             representations[task.model.name] = {}
             for modality in self.modalities:
                 k_best_results, cached_data = (
-                    self.optimization_results.get_k_best_results(modality, 
self.k, task)
+                    self.optimization_results.get_k_best_results(
+                        modality, self.k, task, self.scoring_metric
+                    )
                 )
                 representations[task.model.name][modality.modality_id] = 
k_best_results
                 
self.k_best_representations[task.model.name].extend(k_best_results)
diff --git a/src/main/python/systemds/scuro/drsearch/multimodal_optimizer.py 
b/src/main/python/systemds/scuro/drsearch/multimodal_optimizer.py
index 9d0088a976..76831f6aae 100644
--- a/src/main/python/systemds/scuro/drsearch/multimodal_optimizer.py
+++ b/src/main/python/systemds/scuro/drsearch/multimodal_optimizer.py
@@ -19,6 +19,7 @@
 #
 # -------------------------------------------------------------
 import os
+import torch
 import multiprocessing as mp
 import itertools
 import threading
@@ -83,8 +84,9 @@ def _evaluate_dag_worker(dag_pickle, task_pickle, 
modalities_pickle, debug=False
 
         return OptimizationResult(
             dag=dag_copy,
-            train_score=scores[0],
-            val_score=scores[1],
+            train_score=scores[0].average_scores,
+            val_score=scores[1].average_scores,
+            test_score=scores[2].average_scores,
             runtime=total_time,
             task_name=task_copy.model.name,
             task_time=eval_time,
@@ -106,6 +108,7 @@ class MultimodalOptimizer:
         debug: bool = True,
         min_modalities: int = 2,
         max_modalities: int = None,
+        metric: str = "accuracy",
     ):
         self.modalities = modalities
         self.tasks = tasks
@@ -116,6 +119,7 @@ class MultimodalOptimizer:
 
         self.operator_registry = Registry()
         self.fusion_operators = self.operator_registry.get_fusion_operators()
+        self.metric_name = metric
 
         self.k_best_representations = self._extract_k_best_representations(
             unimodal_optimization_results
@@ -242,7 +246,7 @@ class MultimodalOptimizer:
             for modality in self.modalities:
                 k_best_results, cached_data = (
                     unimodal_optimization_results.get_k_best_results(
-                        modality, self.k, task
+                        modality, self.k, task, self.metric_name
                     )
                 )
 
@@ -367,6 +371,8 @@ class MultimodalOptimizer:
                 task_copy,
             )
 
+            torch.cuda.empty_cache()
+
             if fused_representation is None:
                 return None
 
@@ -388,8 +394,9 @@ class MultimodalOptimizer:
 
             return OptimizationResult(
                 dag=dag_copy,
-                train_score=scores[0],
-                val_score=scores[1],
+                train_score=scores[0].average_scores,
+                val_score=scores[1].average_scores,
+                test_score=scores[2].average_scores,
                 runtime=total_time,
                 representation_time=total_time - eval_time,
                 task_name=task_copy.model.name,
@@ -479,6 +486,7 @@ class OptimizationResult:
     dag: RepresentationDag
     train_score: PerformanceMeasure = None
     val_score: PerformanceMeasure = None
+    test_score: PerformanceMeasure = None
     runtime: float = 0.0
     task_time: float = 0.0
     representation_time: float = 0.0
diff --git a/src/main/python/systemds/scuro/drsearch/ranking.py 
b/src/main/python/systemds/scuro/drsearch/ranking.py
index 831a059eb8..b4b8a392ea 100644
--- a/src/main/python/systemds/scuro/drsearch/ranking.py
+++ b/src/main/python/systemds/scuro/drsearch/ranking.py
@@ -19,8 +19,7 @@
 #
 # -------------------------------------------------------------
 
-from dataclasses import replace
-from typing import Callable, Iterable, List, Optional
+from typing import Callable, Iterable, Optional
 
 
 def rank_by_tradeoff(
@@ -31,7 +30,7 @@ def rank_by_tradeoff(
     runtime_accessor: Optional[Callable[[object], float]] = None,
     cache_scores: bool = True,
     score_attr: str = "tradeoff_score",
-) -> List:
+):
     entries = list(entries)
     if not entries:
         return []
@@ -39,6 +38,7 @@ def rank_by_tradeoff(
     performance_score_accessor = lambda entry: getattr(entry, "val_score")[
         performance_metric_name
     ]
+
     if runtime_accessor is None:
 
         def runtime_accessor(entry):
@@ -77,14 +77,17 @@ def rank_by_tradeoff(
     if cache_scores:
         for entry, score in zip(entries, scores):
             if hasattr(entry, score_attr):
-                try:
-                    new_entry = replace(entry, **{score_attr: score})
-                    entries[entries.index(entry)] = new_entry
-                except TypeError:
-                    setattr(entry, score_attr, score)
+                setattr(entry, score_attr, score)
             else:
                 setattr(entry, score_attr, score)
 
-    return sorted(
-        entries, key=lambda entry: getattr(entry, score_attr, 0.0), 
reverse=True
-    )
+    sorted_entries = sorted(entries, key=lambda e: e.tradeoff_score, 
reverse=True)
+
+    sorted_indices = [
+        i
+        for i, _ in sorted(
+            enumerate(entries), key=lambda pair: pair[1].tradeoff_score, 
reverse=True
+        )
+    ]
+
+    return sorted_entries, sorted_indices
diff --git a/src/main/python/systemds/scuro/drsearch/task.py 
b/src/main/python/systemds/scuro/drsearch/task.py
index bfd1f16ab3..fbe08bcc61 100644
--- a/src/main/python/systemds/scuro/drsearch/task.py
+++ b/src/main/python/systemds/scuro/drsearch/task.py
@@ -20,12 +20,10 @@
 # -------------------------------------------------------------
 import copy
 import time
-from typing import List, Union
-from systemds.scuro.modality.modality import Modality
-from systemds.scuro.representations.representation import Representation
+from typing import List
 from systemds.scuro.models.model import Model
 import numpy as np
-from sklearn.model_selection import KFold
+from sklearn.model_selection import train_test_split
 
 
 class PerformanceMeasure:
@@ -69,7 +67,8 @@ class Task:
         val_indices: List,
         kfold=5,
         measure_performance=True,
-        performance_measures="accuracy",
+        performance_measures=["accuracy"],
+        fusion_train_split=0.8,
     ):
         """
         Parent class for the prediction task that is performed on top of the 
aligned representation
@@ -85,7 +84,7 @@ class Task:
         self.model = model
         self.labels = labels
         self.train_indices = train_indices
-        self.val_indices = val_indices
+        self.test_indices = val_indices
         self.kfold = kfold
         self.measure_performance = measure_performance
         self.inference_time = []
@@ -94,6 +93,47 @@ class Task:
         self.performance_measures = performance_measures
         self.train_scores = PerformanceMeasure("train", performance_measures)
         self.val_scores = PerformanceMeasure("val", performance_measures)
+        self.test_scores = PerformanceMeasure("test", performance_measures)
+        self.fusion_train_indices = None
+        self._create_cv_splits()
+
+    def _create_cv_splits(self):
+        train_labels = [self.labels[i] for i in self.train_indices]
+        train_labels_array = np.array(train_labels)
+
+        train_indices_array = np.array(self.train_indices)
+
+        self.cv_train_indices = []
+        self.cv_val_indices = []
+
+        for fold_idx in range(self.kfold):
+            fold_train_indices_array, fold_val_indices_array, _, _ = 
train_test_split(
+                train_indices_array,
+                train_labels_array,
+                test_size=0.2,
+                shuffle=True,
+                random_state=11 + fold_idx,
+            )
+
+            fold_train_indices = fold_train_indices_array.tolist()
+            fold_val_indices = fold_val_indices_array.tolist()
+
+            self.cv_train_indices.append(fold_train_indices)
+            self.cv_val_indices.append(fold_val_indices)
+
+            overlap = set(fold_train_indices) & set(fold_val_indices)
+            if overlap:
+                raise ValueError(
+                    f"Fold {fold_idx}: Overlap detected between train and val 
indices: {overlap}"
+                )
+
+        all_val_indices = set()
+        for val_indices in self.cv_val_indices:
+            all_val_indices.update(val_indices)
+
+        self.fusion_train_indices = [
+            idx for idx in self.train_indices if idx not in all_val_indices
+        ]
 
     def create_model(self):
         """
@@ -107,12 +147,12 @@ class Task:
     def get_train_test_split(self, data):
         X_train = [data[i] for i in self.train_indices]
         y_train = [self.labels[i] for i in self.train_indices]
-        if self.val_indices is None:
+        if self.test_indices is None:
             X_test = None
             y_test = None
         else:
-            X_test = [data[i] for i in self.val_indices]
-            y_test = [self.labels[i] for i in self.val_indices]
+            X_test = [data[i] for i in self.test_indices]
+            y_test = [self.labels[i] for i in self.test_indices]
 
         return X_train, y_train, X_test, y_test
 
@@ -125,22 +165,25 @@ class Task:
         """
         self._reset_params()
         model = self.create_model()
-        skf = KFold(n_splits=self.kfold, shuffle=True, random_state=11)
 
-        fold = 0
-        X, y, _, _ = self.get_train_test_split(data)
+        test_X = np.array([data[i] for i in self.test_indices])
+        test_y = np.array([self.labels[i] for i in self.test_indices])
+
+        for fold_idx in range(self.kfold):
+            fold_train_indices = self.cv_train_indices[fold_idx]
+            fold_val_indices = self.cv_val_indices[fold_idx]
 
-        for train, test in skf.split(X, y):
-            train_X = np.array(X)[train]
-            train_y = np.array(y)[train]
-            test_X = np.array(X)[test]
-            test_y = np.array(y)[test]
-            self._run_fold(model, train_X, train_y, test_X, test_y)
-            fold += 1
+            train_X = np.array([data[i] for i in fold_train_indices])
+            train_y = np.array([self.labels[i] for i in fold_train_indices])
+            val_X = np.array([data[i] for i in fold_val_indices])
+            val_y = np.array([self.labels[i] for i in fold_val_indices])
+
+            self._run_fold(model, train_X, train_y, val_X, val_y, test_X, 
test_y)
 
         return [
             self.train_scores.compute_averages(),
             self.val_scores.compute_averages(),
+            self.test_scores.compute_averages(),
         ]
 
     def _reset_params(self):
@@ -148,48 +191,18 @@ class Task:
         self.training_time = []
         self.train_scores = PerformanceMeasure("train", 
self.performance_measures)
         self.val_scores = PerformanceMeasure("val", self.performance_measures)
+        self.test_scores = PerformanceMeasure("test", 
self.performance_measures)
 
-    def _run_fold(self, model, train_X, train_y, test_X, test_y):
+    def _run_fold(self, model, train_X, train_y, val_X, val_y, test_X, test_y):
         train_start = time.time()
-        train_score = model.fit(train_X, train_y, test_X, test_y)
+        train_score = model.fit(train_X, train_y, val_X, val_y)
         train_end = time.time()
         self.training_time.append(train_end - train_start)
         self.train_scores.add_scores(train_score[0])
+        val_score = model.test(val_X, val_y)
         test_start = time.time()
         test_score = model.test(np.array(test_X), test_y)
         test_end = time.time()
         self.inference_time.append(test_end - test_start)
-        self.val_scores.add_scores(test_score[0])
-
-    def create_representation_and_run(
-        self,
-        representation: Representation,
-        modalities: Union[List[Modality], Modality],
-    ):
-        self._reset_params()
-        skf = KFold(n_splits=self.kfold, shuffle=True, random_state=11)
-
-        fold = 0
-        X, y, _, _ = self.get_train_test_split(data)
-
-        for train, test in skf.split(X, y):
-            train_X = np.array(X)[train]
-            train_y = np.array(y)[train]
-            test_X = s.transform(np.array(X)[test])
-            test_y = np.array(y)[test]
-
-            if isinstance(modalities, Modality):
-                rep = modality.apply_representation(representation())
-            else:
-                representation().transform(
-                    train_X, train_y
-                )  # TODO: think about a way how to handle masks
-
-            self._run_fold(train_X, train_y, test_X, test_y)
-            fold += 1
-
-        if self.measure_performance:
-            self.inference_time = np.mean(self.inference_time)
-            self.training_time = np.mean(self.training_time)
-
-        return [np.mean(train_scores), np.mean(test_scores)]
+        self.val_scores.add_scores(val_score[0])
+        self.test_scores.add_scores(test_score[0])
diff --git a/src/main/python/systemds/scuro/drsearch/unimodal_optimizer.py 
b/src/main/python/systemds/scuro/drsearch/unimodal_optimizer.py
index 7735986c2e..1a348a91df 100644
--- a/src/main/python/systemds/scuro/drsearch/unimodal_optimizer.py
+++ b/src/main/python/systemds/scuro/drsearch/unimodal_optimizer.py
@@ -54,7 +54,6 @@ class UnimodalOptimizer:
     ):
         self.modalities = modalities
         self.tasks = tasks
-        self.run = None
         self.save_all_results = save_all_results
         self.result_path = result_path
 
@@ -65,7 +64,7 @@ class UnimodalOptimizer:
         self.debug = debug
 
         self.operator_registry = Registry()
-        self.operator_performance = UnimodalResults(modalities, tasks, debug, 
self.run)
+        self.operator_performance = UnimodalResults(modalities, tasks, debug, 
True)
 
         self._tasks_require_same_dims = True
         self.expected_dimensions = tasks[0].expected_dim
@@ -102,6 +101,17 @@ class UnimodalOptimizer:
         with open(file_name, "wb") as f:
             pickle.dump(self.operator_performance.results, f)
 
+    def store_cache(self, file_name=None):
+        if file_name is None:
+            import time
+
+            timestr = time.strftime("%Y%m%d-%H%M%S")
+            file_name = "unimodal_optimizer_cache" + timestr + ".pkl"
+
+        file_name = f"{self.result_path}/{file_name}"
+        with open(file_name, "wb") as f:
+            pickle.dump(self.operator_performance.cache, f)
+
     def load_results(self, file_name):
         with open(file_name, "rb") as f:
             self.operator_performance.results = pickle.load(f)
@@ -390,6 +400,7 @@ class UnimodalResults:
         entry = ResultEntry(
             train_score=scores[0].average_scores,
             val_score=scores[1].average_scores,
+            test_score=scores[2].average_scores,
             representation_time=modality.transform_time,
             task_time=task_time,
             combination=combination.name if combination else "",
@@ -414,22 +425,23 @@ class UnimodalResults:
                 for entry in self.results[modality][task_name]:
                     print(f"{modality}_{task_name}: {entry}")
 
-    def get_k_best_results(self, modality, k, task):
+    def get_k_best_results(self, modality, k, task, performance_metric_name):
         """
         Get the k best results for the given modality
         :param modality: modality to get the best results for
         :param k: number of best results
+        :param task: task to get the best results for
+        :param performance_metric_name: name of the performance metric to use 
for ranking
         """
 
         task_results = self.results[modality.modality_id][task.model.name]
 
-        results = rank_by_tradeoff(task_results)[:k]
+        results, sorted_indices = rank_by_tradeoff(
+            task_results, performance_metric_name=performance_metric_name
+        )
 
-        sorted_indices = sorted(
-            range(len(task_results)),
-            key=lambda x: task_results[x].tradeoff_score,
-            reverse=True,
-        )[:k]
+        results = results[:k]
+        sorted_indices = sorted_indices[:k]
 
         task_cache = self.cache.get(modality.modality_id, 
{}).get(task.model.name, None)
         if not task_cache:
@@ -446,10 +458,11 @@ class UnimodalResults:
         return results, cache
 
 
-@dataclass(frozen=True)
+@dataclass
 class ResultEntry:
     val_score: PerformanceMeasure = None
     train_score: PerformanceMeasure = None
+    test_score: PerformanceMeasure = None
     representation_time: float = 0.0
     task_time: float = 0.0
     combination: str = ""
diff --git a/src/main/python/systemds/scuro/modality/modality.py 
b/src/main/python/systemds/scuro/modality/modality.py
index 98dd631e12..07f80cbd9f 100644
--- a/src/main/python/systemds/scuro/modality/modality.py
+++ b/src/main/python/systemds/scuro/modality/modality.py
@@ -93,6 +93,8 @@ class Modality:
         for i, (md_k, md_v) in enumerate(md_copy.items()):
             updated_md = self.modality_type.update_metadata(md_v, self.data[i])
             self.metadata[md_k] = updated_md
+            if i == 0:
+                self.data_type = updated_md["data_layout"]["type"]
 
     def flatten(self, padding=False):
         """
diff --git a/src/main/python/systemds/scuro/modality/type.py 
b/src/main/python/systemds/scuro/modality/type.py
index c6f713df24..c2fe38176f 100644
--- a/src/main/python/systemds/scuro/modality/type.py
+++ b/src/main/python/systemds/scuro/modality/type.py
@@ -281,7 +281,7 @@ class ModalityType(Flag):
         md["num_channels"] = num_channels
         md["timestamp"] = create_timestamps(frequency, length)
         md["data_layout"]["representation"] = DataLayout.NESTED_LEVEL
-        md["data_layout"]["type"] = float
+        md["data_layout"]["type"] = np.float32
         md["data_layout"]["shape"] = (width, height, num_channels)
         return md
 
@@ -291,7 +291,7 @@ class ModalityType(Flag):
         md["height"] = height
         md["num_channels"] = num_channels
         md["data_layout"]["representation"] = DataLayout.SINGLE_LEVEL
-        md["data_layout"]["type"] = float
+        md["data_layout"]["type"] = np.float32
         md["data_layout"]["shape"] = (width, height, num_channels)
         return md
 
diff --git a/src/main/python/systemds/scuro/representations/concatenation.py 
b/src/main/python/systemds/scuro/representations/concatenation.py
index bf854a481f..ea199d5827 100644
--- a/src/main/python/systemds/scuro/representations/concatenation.py
+++ b/src/main/python/systemds/scuro/representations/concatenation.py
@@ -51,7 +51,7 @@ class Concatenation(Fusion):
         max_emb_size = self.get_max_embedding_size(modalities)
         size = len(modalities[0].data)
 
-        if modalities[0].data.ndim > 2:
+        if np.array(modalities[0].data).ndim > 2:
             data = np.zeros((size, max_emb_size, 0))
         else:
             data = np.zeros((size, 0))
diff --git a/src/main/python/systemds/scuro/representations/fusion.py 
b/src/main/python/systemds/scuro/representations/fusion.py
index 8cf67b1cb4..d491dcad6b 100644
--- a/src/main/python/systemds/scuro/representations/fusion.py
+++ b/src/main/python/systemds/scuro/representations/fusion.py
@@ -121,29 +121,16 @@ class Fusion(Representation):
         :param modalities: List of modalities
         :return: maximum embedding size
         """
-        try:
-            modalities[0].data = np.array(modalities[0].data)
-        except:
-            pass
-
-        if isinstance(modalities[0].data[0], list):
-            max_size = modalities[0].data[0][0].shape[1]
-        elif isinstance(modalities[0].data, np.ndarray):
-            max_size = modalities[0].data.shape[1]
-        else:
-            max_size = modalities[0].data[0].shape[1]
-        for idx in range(1, len(modalities)):
-            if isinstance(modalities[idx].data[0], list):
-                curr_shape = modalities[idx].data[0][0].shape
-            elif isinstance(modalities[idx].data, np.ndarray):
-                curr_shape = modalities[idx].data.shape
-            else:
-                curr_shape = modalities[idx].data[0].shape
-            if len(modalities[idx - 1].data) != len(modalities[idx].data):
-                raise f"Modality sizes don't match!"
-            elif len(curr_shape) == 1:
-                continue
-            elif curr_shape[1] > max_size:
-                max_size = curr_shape[1]
 
+        max_size = 0
+        for m in modalities:
+            data = m.data
+            if isinstance(data, memoryview):
+                data = np.array(data)
+            arr = np.asarray(data)
+            if arr.ndim < 2:
+                continue
+            emb_size = arr.shape[1]
+            if emb_size > max_size:
+                max_size = emb_size
         return max_size
diff --git 
a/src/main/python/systemds/scuro/representations/timeseries_representations.py 
b/src/main/python/systemds/scuro/representations/timeseries_representations.py
index d1dee67f86..631294809e 100644
--- 
a/src/main/python/systemds/scuro/representations/timeseries_representations.py
+++ 
b/src/main/python/systemds/scuro/representations/timeseries_representations.py
@@ -46,7 +46,7 @@ class TimeSeriesRepresentation(UnimodalRepresentation):
             feature = self.compute_feature(signal)
             result.append(feature)
 
-        transformed_modality.data = np.vstack(result).astype(
+        transformed_modality.data = np.vstack(np.array(result)).astype(
             
modality.metadata[list(modality.metadata.keys())[0]]["data_layout"]["type"]
         )
         return transformed_modality
diff --git a/src/main/python/tests/scuro/data_generator.py 
b/src/main/python/tests/scuro/data_generator.py
index 3c43cabb3e..5bec163fe7 100644
--- a/src/main/python/tests/scuro/data_generator.py
+++ b/src/main/python/tests/scuro/data_generator.py
@@ -198,14 +198,14 @@ class ModalityRandomDataGenerator:
     ):
         if max_num_frames > 1:
             data = [
-                np.random.randint(
-                    0,
-                    256,
+                np.random.uniform(
+                    0.0,
+                    1.0,
                     (np.random.randint(10, max_num_frames + 1), height, width, 
3),
-                    dtype=np.uint8,
                 )
                 for _ in range(num_instances)
             ]
+
             metadata = {
                 i: ModalityType.VIDEO.create_metadata(
                     30, data[i].shape[0], width, height, 3
diff --git a/src/main/python/tests/scuro/test_multimodal_fusion.py 
b/src/main/python/tests/scuro/test_multimodal_fusion.py
index 395a9cd862..f98824a16a 100644
--- a/src/main/python/tests/scuro/test_multimodal_fusion.py
+++ b/src/main/python/tests/scuro/test_multimodal_fusion.py
@@ -174,7 +174,9 @@ class 
TestMultimodalRepresentationOptimizer(unittest.TestCase):
             registry._fusion_operators = [Average, Concatenation, LSTM]
             unimodal_optimizer = UnimodalOptimizer([audio, text], [task], 
debug=False)
             unimodal_optimizer.optimize()
-            unimodal_optimizer.operator_performance.get_k_best_results(audio, 
2, task)
+            unimodal_optimizer.operator_performance.get_k_best_results(
+                audio, 2, task, "accuracy"
+            )
             m_o = MultimodalOptimizer(
                 [audio, text],
                 unimodal_optimizer.operator_performance,
@@ -187,13 +189,13 @@ class 
TestMultimodalRepresentationOptimizer(unittest.TestCase):
 
             best_results = sorted(
                 fusion_results[task.model.name],
-                key=lambda x: getattr(x, 
"val_score").average_scores["accuracy"],
+                key=lambda x: getattr(x, "val_score")["accuracy"],
                 reverse=True,
             )[:2]
 
             assert (
-                best_results[0].val_score.average_scores["accuracy"]
-                >= best_results[1].val_score.average_scores["accuracy"]
+                best_results[0].val_score["accuracy"]
+                >= best_results[1].val_score["accuracy"]
             )
 
     def test_parallel_multimodal_fusion(self):
@@ -238,7 +240,9 @@ class 
TestMultimodalRepresentationOptimizer(unittest.TestCase):
             registry._fusion_operators = [Average, Concatenation, LSTM]
             unimodal_optimizer = UnimodalOptimizer([audio, text], [task], 
debug=False)
             unimodal_optimizer.optimize()
-            unimodal_optimizer.operator_performance.get_k_best_results(audio, 
2, task)
+            unimodal_optimizer.operator_performance.get_k_best_results(
+                audio, 2, task, "accuracy"
+            )
             m_o = MultimodalOptimizer(
                 [audio, text],
                 unimodal_optimizer.operator_performance,
@@ -252,21 +256,21 @@ class 
TestMultimodalRepresentationOptimizer(unittest.TestCase):
 
             best_results = sorted(
                 fusion_results[task.model.name],
-                key=lambda x: getattr(x, 
"val_score").average_scores["accuracy"],
+                key=lambda x: getattr(x, "val_score")["accuracy"],
                 reverse=True,
             )
 
             best_results_parallel = sorted(
                 parallel_fusion_results[task.model.name],
-                key=lambda x: getattr(x, 
"val_score").average_scores["accuracy"],
+                key=lambda x: getattr(x, "val_score")["accuracy"],
                 reverse=True,
             )
 
             assert len(best_results) == len(best_results_parallel)
             for i in range(len(best_results)):
                 assert (
-                    best_results[i].val_score.average_scores["accuracy"]
-                    == 
best_results_parallel[i].val_score.average_scores["accuracy"]
+                    best_results[i].val_score["accuracy"]
+                    == best_results_parallel[i].val_score["accuracy"]
                 )
 
 
diff --git a/src/main/python/tests/scuro/test_unimodal_optimizer.py 
b/src/main/python/tests/scuro/test_unimodal_optimizer.py
index 252dfe997a..ca54ee64b1 100644
--- a/src/main/python/tests/scuro/test_unimodal_optimizer.py
+++ b/src/main/python/tests/scuro/test_unimodal_optimizer.py
@@ -210,7 +210,7 @@ class 
TestUnimodalRepresentationOptimizer(unittest.TestCase):
             registry = Registry()
 
             unimodal_optimizer = UnimodalOptimizer([modality], self.tasks, 
False)
-            unimodal_optimizer.optimize_parallel()
+            unimodal_optimizer.optimize()
 
             assert (
                 unimodal_optimizer.operator_performance.modality_ids[0]
@@ -218,7 +218,7 @@ class 
TestUnimodalRepresentationOptimizer(unittest.TestCase):
             )
             assert len(unimodal_optimizer.operator_performance.task_names) == 2
             result, cached = 
unimodal_optimizer.operator_performance.get_k_best_results(
-                modality, 1, self.tasks[0]
+                modality, 1, self.tasks[0], "accuracy"
             )
             assert len(result) == 1
             assert len(cached) == 1
diff --git a/src/main/python/tests/scuro/test_unimodal_representations.py 
b/src/main/python/tests/scuro/test_unimodal_representations.py
index 3bc28ee23c..0313cd29f8 100644
--- a/src/main/python/tests/scuro/test_unimodal_representations.py
+++ b/src/main/python/tests/scuro/test_unimodal_representations.py
@@ -169,7 +169,6 @@ class TestUnimodalRepresentations(unittest.TestCase):
     def test_video_representations(self):
         video_representations = [
             CLIPVisual(),
-            ColorHistogram(),
             I3D(),
             X3D(),
             VGG19(),

Reply via email to