2024-02-14 16:57:01 +01:00
|
|
|
loaders:
|
|
|
|
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
2024-02-15 17:49:25 +01:00
|
|
|
search_path: [../../../haystack/components/evaluators]
|
2024-02-14 16:57:01 +01:00
|
|
|
modules: ["sas_evaluator", "statistical_evaluator"]
|
|
|
|
ignore_when_discovered: ["__init__"]
|
|
|
|
processors:
|
|
|
|
- type: filter
|
|
|
|
expression:
|
|
|
|
documented_only: true
|
|
|
|
do_not_filter_modules: false
|
|
|
|
skip_empty_modules: true
|
|
|
|
- type: smart
|
|
|
|
- type: crossref
|
|
|
|
renderer:
|
|
|
|
type: haystack_pydoc_tools.renderers.ReadmePreviewRenderer
|
|
|
|
excerpt: Enables evaluation of LLMs generated answers.
|
|
|
|
category_slug: haystack-api
|
|
|
|
title: Evaluation
|
|
|
|
slug: eval-api
|
|
|
|
order: 63
|
|
|
|
markdown:
|
|
|
|
descriptive_class_title: false
|
|
|
|
descriptive_module_title: true
|
|
|
|
add_method_class_prefix: true
|
|
|
|
add_member_class_prefix: false
|
|
|
|
filename: eval_api.md
|