mirror of
https://github.com/Unstructured-IO/unstructured.git
synced 2025-06-27 02:30:08 +00:00

This PR adds new table evaluation metrics prepared by @leah1985 The metrics include: - `table count` (check) - `table_level_acc` - accuracy of table detection - `element_col_level_index_acc` - accuracy of cell detection in columns - `element_row_level_index_acc` - accuracy of cell detection in rows - `element_col_level_content_acc` - accuracy of content detected in columns - `element_row_level_content_acc` - accuracy of content detected in rows TODO in next steps: - create a minimal dataset and upload to s3 for ingest tests - generate and add metrics on the above dataset to `test_unstructured_ingest/metrics`
73 lines
2.0 KiB
Bash
Executable File
73 lines
2.0 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
|
|
set -e
|
|
|
|
SCRIPT_DIR=$(dirname "$(realpath "$0")")
|
|
cd "$SCRIPT_DIR"/.. || exit 1
|
|
|
|
EVAL_NAME="$1"
|
|
|
|
# List all structured outputs to use in this evaluation
|
|
OUTPUT_ROOT=${2:-${OUTPUT_ROOT:-$SCRIPT_DIR}}
|
|
OUTPUT_DIR=$OUTPUT_ROOT/structured-output-eval
|
|
mkdir -p "$OUTPUT_DIR"
|
|
|
|
if [ "$EVAL_NAME" == "text-extraction" ]; then
|
|
METRIC_STRATEGY="measure-text-extraction-accuracy-command"
|
|
elif [ "$EVAL_NAME" == "element-type" ]; then
|
|
METRIC_STRATEGY="measure-element-type-accuracy-command"
|
|
elif [ "$EVAL_NAME" == "table-structure" ]; then
|
|
METRIC_STRATEGY="measure-table-structure-accuracy-command"
|
|
else
|
|
echo "Wrong metric evaluation strategy given. Expected one of [ text-extraction, element-type ]. Got [ $EVAL_NAME ]."
|
|
exit 1
|
|
fi
|
|
|
|
# Download cct test from s3
|
|
BUCKET_NAME=utic-dev-tech-fixtures
|
|
FOLDER_NAME=small-eval-"$EVAL_NAME"
|
|
SOURCE_DIR=$OUTPUT_ROOT/gold-standard/$FOLDER_NAME
|
|
mkdir -p "$SOURCE_DIR"
|
|
aws s3 cp "s3://$BUCKET_NAME/$FOLDER_NAME" "$SOURCE_DIR" --recursive --no-sign-request --region us-east-2
|
|
|
|
EXPORT_DIR=$OUTPUT_ROOT/metrics-tmp/$EVAL_NAME
|
|
|
|
# shellcheck disable=SC1091
|
|
source "$SCRIPT_DIR"/cleanup.sh
|
|
function cleanup() {
|
|
cleanup_dir "$SOURCE_DIR"
|
|
}
|
|
trap cleanup EXIT
|
|
|
|
# build args
|
|
function generate_args() {
|
|
local argtype="$1"
|
|
local dirpath="$2"
|
|
local list=("${@:3}")
|
|
|
|
local -a args
|
|
|
|
args=("--${argtype}_dir" "$dirpath")
|
|
for filename in "${list[@]}"; do
|
|
args+=("--${argtype}_list" "$filename")
|
|
done
|
|
echo "${args[@]}"
|
|
}
|
|
|
|
# List selected output as a subset of OUTPUT_DIR, if any
|
|
OUTPUT_LIST=(
|
|
)
|
|
# List selected source as a subset of SOURCE_DIR, if any
|
|
SOURCE_LIST=(
|
|
)
|
|
|
|
read -ra output_args <<<"$(generate_args "output" "$OUTPUT_DIR" "${OUTPUT_LIST[@]}")"
|
|
read -ra source_args <<<"$(generate_args "source" "$SOURCE_DIR" "${SOURCE_LIST[@]}")"
|
|
|
|
# mkdir export_dir is handled in python script
|
|
PYTHONPATH=. ./unstructured/ingest/evaluate.py \
|
|
$METRIC_STRATEGY "${output_args[@]}" "${source_args[@]}" \
|
|
--export_dir "$EXPORT_DIR"
|
|
|
|
"$SCRIPT_DIR"/check-diff-evaluation-metrics.sh "$EVAL_NAME"
|