haystack/test/evaluation/test_eval.py
Silvano Cerza 36ab23d360
feat: Add StatisticalEvaluator component (#6982)
* Add StatisticalEvaluator component

* Remove F1 and Exact Metric from old API

* Add release notes

* Update docstrings
2024-02-14 16:48:03 +01:00

35 lines
1.3 KiB
Python

from unittest.mock import MagicMock
from haystack.core.pipeline import Pipeline
from haystack.evaluation.eval import EvaluationResult
from haystack.evaluation.metrics import Metric
class TestEvaluationResult:
def test_init(self):
runnable = Pipeline()
result = EvaluationResult(runnable=runnable, inputs=[], outputs=[], expected_outputs=[])
assert result.runnable == runnable
assert result.inputs == []
assert result.outputs == []
assert result.expected_outputs == []
def test_calculate_metrics_with_supported_metric(self):
runnable = Pipeline()
result = EvaluationResult(runnable=runnable, inputs=[], outputs=[], expected_outputs=[])
result._supported_metrics[Metric.RECALL] = MagicMock()
result.calculate_metrics(metric=Metric.RECALL)
assert result._supported_metrics[Metric.RECALL].called_once_with()
def test_calculate_metrics_with_non_supported_metric(self):
runnable = Pipeline()
result = EvaluationResult(runnable=runnable, inputs=[], outputs=[], expected_outputs=[])
unsupported_metric = MagicMock()
result.calculate_metrics(metric=unsupported_metric, some_argument="some_value")
assert unsupported_metric.called_once_with(some_argument="some_value")