2023-09-19 15:32:46 -07:00
|
|
|
# pyright: reportPrivateUsage=false
|
|
|
|
|
2023-09-27 11:32:46 -04:00
|
|
|
import pathlib
|
2023-07-26 15:10:14 -04:00
|
|
|
from tempfile import SpooledTemporaryFile
|
2023-10-12 23:26:14 -07:00
|
|
|
from typing import Dict, List, cast
|
2023-01-05 15:13:39 -05:00
|
|
|
|
|
|
|
import docx
|
2023-02-27 17:30:54 +01:00
|
|
|
import pytest
|
2023-09-27 11:32:46 -04:00
|
|
|
from docx.document import Document
|
2023-11-01 22:22:17 -07:00
|
|
|
from pytest_mock import MockFixture
|
2023-01-05 15:13:39 -05:00
|
|
|
|
2023-10-12 12:47:55 -07:00
|
|
|
from test_unstructured.unit_utils import assert_round_trips_through_JSON
|
2023-09-11 16:00:14 -05:00
|
|
|
from unstructured.chunking.title import chunk_by_title
|
2023-02-27 17:30:54 +01:00
|
|
|
from unstructured.documents.elements import (
|
|
|
|
Address,
|
2023-11-01 22:22:17 -07:00
|
|
|
CompositeElement,
|
2023-09-27 11:32:46 -04:00
|
|
|
Element,
|
2023-06-14 09:42:59 -04:00
|
|
|
Footer,
|
|
|
|
Header,
|
2023-02-27 17:30:54 +01:00
|
|
|
ListItem,
|
|
|
|
NarrativeText,
|
2023-05-18 10:20:38 -04:00
|
|
|
Table,
|
2023-11-01 22:22:17 -07:00
|
|
|
TableChunk,
|
2023-02-27 17:30:54 +01:00
|
|
|
Text,
|
|
|
|
Title,
|
|
|
|
)
|
2023-09-19 15:32:46 -07:00
|
|
|
from unstructured.partition.docx import _DocxPartitioner, partition_docx
|
2023-10-05 15:26:47 -05:00
|
|
|
from unstructured.partition.utils.constants import UNSTRUCTURED_INCLUDE_DEBUG_METADATA
|
2023-01-05 15:13:39 -05:00
|
|
|
|
|
|
|
|
2023-11-07 16:37:21 -08:00
|
|
|
class Describe_DocxPartitioner:
|
|
|
|
"""Unit-test suite for `unstructured.partition.docx._DocxPartitioner`."""
|
|
|
|
|
|
|
|
def it_can_convert_a_table_to_html(self):
|
|
|
|
table = docx.Document(example_doc_path("docx-tables.docx")).tables[0]
|
|
|
|
assert _DocxPartitioner()._convert_table_to_html(table) == (
|
|
|
|
"<table>\n"
|
|
|
|
"<thead>\n"
|
|
|
|
"<tr><th>Header Col 1 </th><th>Header Col 2 </th></tr>\n"
|
|
|
|
"</thead>\n"
|
|
|
|
"<tbody>\n"
|
|
|
|
"<tr><td>Lorem ipsum </td><td>A link example</td></tr>\n"
|
|
|
|
"</tbody>\n"
|
|
|
|
"</table>"
|
|
|
|
)
|
|
|
|
|
|
|
|
def and_it_can_convert_a_nested_table_to_html(self):
|
|
|
|
"""
|
|
|
|
Fixture table is:
|
|
|
|
|
|
|
|
+---+-------------+---+
|
|
|
|
| a | >b< | c |
|
|
|
|
+---+-------------+---+
|
|
|
|
| | +-----+---+ | |
|
|
|
|
| | | e | f | | |
|
|
|
|
| d | +-----+---+ | i |
|
|
|
|
| | | g&t | h | | |
|
|
|
|
| | +-----+---+ | |
|
|
|
|
+---+-------------+---+
|
|
|
|
| j | k | l |
|
|
|
|
+---+-------------+---+
|
|
|
|
"""
|
|
|
|
table = docx.Document(example_doc_path("docx-tables.docx")).tables[1]
|
|
|
|
|
|
|
|
html = _DocxPartitioner()._convert_table_to_html(table)
|
|
|
|
|
|
|
|
expected_lines = [
|
|
|
|
"<table>",
|
|
|
|
"<thead>",
|
|
|
|
f"<tr><th>a </th><th>>b<{' ' * 96}</th><th>c </th></tr>",
|
|
|
|
"</thead>",
|
|
|
|
"<tbody>",
|
|
|
|
"<tr><td>d </td><td><table>",
|
|
|
|
"<tbody>",
|
|
|
|
"<tr><td>e </td><td>f</td></tr>",
|
|
|
|
"<tr><td>g&t</td><td>h</td></tr>",
|
|
|
|
"</tbody>",
|
|
|
|
"</table></td><td>i </td></tr>",
|
|
|
|
f"<tr><td>j </td><td>k{' ' * 104}</td><td>l </td></tr>",
|
|
|
|
"</tbody>",
|
|
|
|
"</table>",
|
|
|
|
]
|
|
|
|
actual_lines = html.splitlines()
|
|
|
|
for expected, actual in zip(expected_lines, actual_lines):
|
|
|
|
assert actual == expected
|
|
|
|
|
|
|
|
def it_can_convert_a_table_to_plain_text(self):
|
|
|
|
table = docx.Document(example_doc_path("docx-tables.docx")).tables[0]
|
|
|
|
assert _DocxPartitioner()._convert_table_to_plain_text(table) == (
|
|
|
|
"Header Col 1 Header Col 2\n" "Lorem ipsum A link example"
|
|
|
|
)
|
|
|
|
|
|
|
|
def and_it_can_convert_a_nested_table_to_plain_text(self):
|
|
|
|
"""
|
|
|
|
Fixture table is:
|
|
|
|
|
|
|
|
+---+-------------+---+
|
|
|
|
| a | >b< | c |
|
|
|
|
+---+-------------+---+
|
|
|
|
| | +-----+---+ | |
|
|
|
|
| | | e | f | | |
|
|
|
|
| d | +-----+---+ | i |
|
|
|
|
| | | g&t | h | | |
|
|
|
|
| | +-----+---+ | |
|
|
|
|
+---+-------------+---+
|
|
|
|
| j | k | l |
|
|
|
|
+---+-------------+---+
|
|
|
|
"""
|
|
|
|
table = docx.Document(example_doc_path("docx-tables.docx")).tables[1]
|
|
|
|
assert _DocxPartitioner()._convert_table_to_plain_text(table) == (
|
|
|
|
"a >b< c\nd e f i\n g&t h\nj k l"
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2023-10-24 10:17:02 -05:00
|
|
|
def test_parition_docx_from_team_chat():
|
2023-11-08 11:05:19 -08:00
|
|
|
"""Docx with no sections partitions recognizing both paragraphs and tables."""
|
2023-11-01 22:22:17 -07:00
|
|
|
elements = cast(List[Text], partition_docx(example_doc_path("teams_chat.docx")))
|
2023-11-08 11:05:19 -08:00
|
|
|
assert [e.text for e in elements] == [
|
2023-10-24 10:17:02 -05:00
|
|
|
"0:0:0.0 --> 0:0:1.510\nSome Body\nOK. Yeah.",
|
|
|
|
"0:0:3.270 --> 0:0:4.250\nJames Bond\nUmm.",
|
2023-11-08 11:05:19 -08:00
|
|
|
"saved-by Dennis Forsythe",
|
2023-10-24 10:17:02 -05:00
|
|
|
]
|
2023-11-08 11:05:19 -08:00
|
|
|
assert [e.category for e in elements] == ["UncategorizedText", "UncategorizedText", "Table"]
|
2023-10-24 10:17:02 -05:00
|
|
|
|
|
|
|
|
2023-09-27 11:32:46 -04:00
|
|
|
def test_partition_docx_from_filename(
|
2023-11-01 22:22:17 -07:00
|
|
|
mock_document_file_path: str,
|
2023-09-27 11:32:46 -04:00
|
|
|
expected_elements: List[Element],
|
|
|
|
):
|
2023-11-01 22:22:17 -07:00
|
|
|
elements = partition_docx(mock_document_file_path)
|
2023-01-05 15:13:39 -05:00
|
|
|
|
|
|
|
assert elements == expected_elements
|
2023-06-15 12:21:17 -04:00
|
|
|
assert elements[0].metadata.page_number is None
|
2023-07-05 15:02:22 -05:00
|
|
|
for element in elements:
|
|
|
|
assert element.metadata.filename == "mock_document.docx"
|
2023-10-05 15:26:47 -05:00
|
|
|
if UNSTRUCTURED_INCLUDE_DEBUG_METADATA:
|
|
|
|
assert {element.metadata.detection_origin for element in elements} == {"docx"}
|
2023-07-05 15:02:22 -05:00
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
def test_partition_docx_from_filename_with_metadata_filename(mock_document_file_path: str):
|
|
|
|
elements = partition_docx(mock_document_file_path, metadata_filename="test")
|
2023-07-05 15:02:22 -05:00
|
|
|
assert all(element.metadata.filename == "test" for element in elements)
|
2023-01-05 15:13:39 -05:00
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
def test_partition_docx_with_spooled_file(
|
|
|
|
mock_document_file_path: str, expected_elements: List[Text]
|
|
|
|
):
|
|
|
|
"""`partition_docx()` accepts a SpooledTemporaryFile as its `file` argument.
|
2023-05-09 21:39:07 -07:00
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
`python-docx` will NOT accept a `SpooledTemporaryFile` in Python versions before 3.11 so we need
|
|
|
|
to ensure the source file is appropriately converted in this case.
|
|
|
|
"""
|
|
|
|
with open(mock_document_file_path, "rb") as test_file:
|
2023-05-09 21:39:07 -07:00
|
|
|
spooled_temp_file = SpooledTemporaryFile()
|
|
|
|
spooled_temp_file.write(test_file.read())
|
|
|
|
spooled_temp_file.seek(0)
|
|
|
|
elements = partition_docx(file=spooled_temp_file)
|
|
|
|
assert elements == expected_elements
|
2023-07-05 15:02:22 -05:00
|
|
|
for element in elements:
|
|
|
|
assert element.metadata.filename is None
|
2023-05-09 21:39:07 -07:00
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
def test_partition_docx_from_file(mock_document_file_path: str, expected_elements: List[Text]):
|
|
|
|
with open(mock_document_file_path, "rb") as f:
|
2023-01-05 15:13:39 -05:00
|
|
|
elements = partition_docx(file=f)
|
|
|
|
assert elements == expected_elements
|
2023-07-05 15:02:22 -05:00
|
|
|
for element in elements:
|
|
|
|
assert element.metadata.filename is None
|
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
@pytest.mark.parametrize("infer_table_structure", [True, False])
|
|
|
|
def test_partition_docx_infer_table_structure(infer_table_structure: bool):
|
2023-10-23 17:11:53 -07:00
|
|
|
elements = partition_docx(
|
2023-11-01 22:22:17 -07:00
|
|
|
example_doc_path("fake_table.docx"), infer_table_structure=infer_table_structure
|
2023-10-23 17:11:53 -07:00
|
|
|
)
|
|
|
|
table_element_has_text_as_html_field = (
|
|
|
|
hasattr(elements[0].metadata, "text_as_html")
|
|
|
|
and elements[0].metadata.text_as_html is not None
|
|
|
|
)
|
|
|
|
assert table_element_has_text_as_html_field == infer_table_structure
|
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
def test_partition_docx_from_file_with_metadata_filename(
|
|
|
|
mock_document_file_path: str, expected_elements: List[Text]
|
|
|
|
):
|
|
|
|
with open(mock_document_file_path, "rb") as f:
|
2023-07-05 15:02:22 -05:00
|
|
|
elements = partition_docx(file=f, metadata_filename="test")
|
|
|
|
assert elements == expected_elements
|
|
|
|
for element in elements:
|
|
|
|
assert element.metadata.filename == "test"
|
2023-01-05 15:13:39 -05:00
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
def test_partition_docx_raises_with_both_specified(mock_document_file_path: str):
|
|
|
|
with open(mock_document_file_path, "rb") as f:
|
|
|
|
with pytest.raises(ValueError, match="Exactly one of filename and file must be specified"):
|
|
|
|
partition_docx(filename=mock_document_file_path, file=f)
|
2023-01-05 15:13:39 -05:00
|
|
|
|
|
|
|
|
|
|
|
def test_partition_docx_raises_with_neither():
|
2023-11-01 22:22:17 -07:00
|
|
|
with pytest.raises(ValueError, match="Exactly one of filename and file must be specified"):
|
2023-01-05 15:13:39 -05:00
|
|
|
partition_docx()
|
2023-05-18 10:20:38 -04:00
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
def test_partition_docx_processes_table():
|
|
|
|
elements = partition_docx(example_doc_path("fake_table.docx"))
|
2023-05-18 10:20:38 -04:00
|
|
|
|
|
|
|
assert isinstance(elements[0], Table)
|
2023-11-01 22:22:17 -07:00
|
|
|
assert elements[0].metadata.text_as_html == (
|
|
|
|
"<table>\n"
|
|
|
|
"<thead>\n"
|
|
|
|
"<tr><th>Header Col 1 </th><th>Header Col 2 </th></tr>\n"
|
|
|
|
"</thead>\n"
|
|
|
|
"<tbody>\n"
|
|
|
|
"<tr><td>Lorem ipsum </td><td>A Link example</td></tr>\n"
|
|
|
|
"</tbody>\n"
|
|
|
|
"</table>"
|
2023-05-18 10:20:38 -04:00
|
|
|
)
|
|
|
|
assert elements[0].metadata.filename == "fake_table.docx"
|
2023-06-14 09:42:59 -04:00
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
def test_partition_docx_grabs_header_and_footer():
|
|
|
|
elements = partition_docx(example_doc_path("handbook-1p.docx"))
|
|
|
|
|
2023-06-14 09:42:59 -04:00
|
|
|
assert elements[0] == Header("US Trustee Handbook")
|
|
|
|
assert elements[-1] == Footer("Copyright")
|
2023-07-05 15:02:22 -05:00
|
|
|
for element in elements:
|
|
|
|
assert element.metadata.filename == "handbook-1p.docx"
|
2023-06-15 12:21:17 -04:00
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
def test_partition_docx_includes_pages_if_present():
|
|
|
|
elements = cast(
|
|
|
|
List[Text], partition_docx(example_doc_path("handbook-1p.docx"), include_page_breaks=False)
|
|
|
|
)
|
|
|
|
|
2023-06-28 23:14:05 -04:00
|
|
|
assert "PageBreak" not in [elem.category for elem in elements]
|
2023-06-15 12:21:17 -04:00
|
|
|
assert elements[1].metadata.page_number == 1
|
|
|
|
assert elements[-2].metadata.page_number == 2
|
2023-07-05 15:02:22 -05:00
|
|
|
for element in elements:
|
|
|
|
assert element.metadata.filename == "handbook-1p.docx"
|
2023-06-15 12:21:17 -04:00
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
def test_partition_docx_includes_page_breaks():
|
|
|
|
elements = cast(
|
|
|
|
List[Text], partition_docx(example_doc_path("handbook-1p.docx"), include_page_breaks=True)
|
|
|
|
)
|
|
|
|
|
2023-06-28 23:14:05 -04:00
|
|
|
assert "PageBreak" in [elem.category for elem in elements]
|
2023-06-15 12:21:17 -04:00
|
|
|
assert elements[1].metadata.page_number == 1
|
|
|
|
assert elements[-2].metadata.page_number == 2
|
2023-07-05 15:02:22 -05:00
|
|
|
for element in elements:
|
|
|
|
assert element.metadata.filename == "handbook-1p.docx"
|
2023-06-30 09:44:46 -05:00
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
def test_partition_docx_detects_lists():
|
|
|
|
elements = partition_docx(example_doc_path("example-list-items-multiple.docx"))
|
|
|
|
|
2023-07-10 10:29:08 -05:00
|
|
|
assert elements[-1] == ListItem(
|
|
|
|
"This is simply dummy text of the printing and typesetting industry.",
|
|
|
|
)
|
2023-11-01 22:22:17 -07:00
|
|
|
assert sum(1 for e in elements if isinstance(e, ListItem)) == 10
|
2023-07-10 10:29:08 -05:00
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
def test_partition_docx_from_filename_exclude_metadata():
|
|
|
|
elements = partition_docx(example_doc_path("handbook-1p.docx"), include_metadata=False)
|
|
|
|
|
2023-06-30 09:44:46 -05:00
|
|
|
assert elements[0].metadata.filetype is None
|
|
|
|
assert elements[0].metadata.page_name is None
|
|
|
|
assert elements[0].metadata.filename is None
|
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
def test_partition_docx_from_file_exclude_metadata(mock_document_file_path: str):
|
|
|
|
with open(mock_document_file_path, "rb") as f:
|
2023-06-30 09:44:46 -05:00
|
|
|
elements = partition_docx(file=f, include_metadata=False)
|
2023-11-01 22:22:17 -07:00
|
|
|
|
2023-06-30 09:44:46 -05:00
|
|
|
assert elements[0].metadata.filetype is None
|
|
|
|
assert elements[0].metadata.page_name is None
|
|
|
|
assert elements[0].metadata.filename is None
|
2023-07-26 15:10:14 -04:00
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
def test_partition_docx_metadata_date(mocker: MockFixture):
|
2023-07-26 15:10:14 -04:00
|
|
|
mocker.patch(
|
2023-11-01 22:22:17 -07:00
|
|
|
"unstructured.partition.docx.get_last_modified_date", return_value="2029-07-05T09:24:28"
|
2023-07-26 15:10:14 -04:00
|
|
|
)
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
elements = partition_docx(example_doc_path("fake.docx"))
|
2023-07-26 15:10:14 -04:00
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
assert elements[0].metadata.last_modified == "2029-07-05T09:24:28"
|
2023-07-26 15:10:14 -04:00
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
def test_partition_docx_metadata_date_with_custom_metadata(mocker: MockFixture):
|
2023-07-26 15:10:14 -04:00
|
|
|
mocker.patch(
|
2023-11-01 22:22:17 -07:00
|
|
|
"unstructured.partition.docx.get_last_modified_date", return_value="2023-11-01T14:13:07"
|
2023-07-26 15:10:14 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
elements = partition_docx(
|
2023-11-01 22:22:17 -07:00
|
|
|
example_doc_path("fake.docx"), metadata_last_modified="2020-07-05T09:24:28"
|
2023-07-26 15:10:14 -04:00
|
|
|
)
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
assert elements[0].metadata.last_modified == "2020-07-05T09:24:28"
|
2023-07-26 15:10:14 -04:00
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
def test_partition_docx_from_file_metadata_date(mocker: MockFixture):
|
2023-07-26 15:10:14 -04:00
|
|
|
mocker.patch(
|
|
|
|
"unstructured.partition.docx.get_last_modified_date_from_file",
|
2023-11-01 22:22:17 -07:00
|
|
|
return_value="2029-07-05T09:24:28",
|
2023-07-26 15:10:14 -04:00
|
|
|
)
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
with open(example_doc_path("fake.docx"), "rb") as f:
|
2023-07-26 15:10:14 -04:00
|
|
|
elements = partition_docx(file=f)
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
assert elements[0].metadata.last_modified == "2029-07-05T09:24:28"
|
2023-07-26 15:10:14 -04:00
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
def test_partition_docx_from_file_metadata_date_with_custom_metadata(mocker: MockFixture):
|
2023-07-26 15:10:14 -04:00
|
|
|
mocker.patch(
|
|
|
|
"unstructured.partition.docx.get_last_modified_date_from_file",
|
2023-11-01 22:22:17 -07:00
|
|
|
return_value="2023-11-01T14:13:07",
|
2023-07-26 15:10:14 -04:00
|
|
|
)
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
with open(example_doc_path("fake.docx"), "rb") as f:
|
|
|
|
elements = partition_docx(file=f, metadata_last_modified="2020-07-05T09:24:28")
|
2023-07-26 15:10:14 -04:00
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
assert elements[0].metadata.last_modified == "2020-07-05T09:24:28"
|
2023-07-26 15:10:14 -04:00
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
def test_partition_docx_from_file_without_metadata_date():
|
|
|
|
"""Test partition_docx() with file that are not possible to get last modified date"""
|
|
|
|
with open(example_doc_path("fake.docx"), "rb") as f:
|
2023-07-26 15:10:14 -04:00
|
|
|
sf = SpooledTemporaryFile()
|
|
|
|
sf.write(f.read())
|
|
|
|
sf.seek(0)
|
|
|
|
elements = partition_docx(file=sf)
|
|
|
|
|
2023-07-31 19:55:43 -07:00
|
|
|
assert elements[0].metadata.last_modified is None
|
2023-08-04 14:04:12 -07:00
|
|
|
|
|
|
|
|
2023-09-19 15:32:46 -07:00
|
|
|
def test_get_emphasized_texts_from_paragraph(expected_emphasized_texts: List[Dict[str, str]]):
|
|
|
|
partitioner = _DocxPartitioner(
|
2023-09-19 22:26:36 -06:00
|
|
|
"example-docs/fake-doc-emphasized-text.docx",
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
False,
|
2023-10-23 17:11:53 -07:00
|
|
|
True,
|
2023-09-19 22:26:36 -06:00
|
|
|
None,
|
2023-09-19 15:32:46 -07:00
|
|
|
)
|
|
|
|
paragraph = partitioner._document.paragraphs[1]
|
|
|
|
emphasized_texts = list(partitioner._iter_paragraph_emphasis(paragraph))
|
2023-08-04 14:04:12 -07:00
|
|
|
assert paragraph.text == "I am a bold italic bold-italic text."
|
2023-08-15 21:33:06 -07:00
|
|
|
assert emphasized_texts == expected_emphasized_texts
|
2023-08-04 14:04:12 -07:00
|
|
|
|
2023-09-19 15:32:46 -07:00
|
|
|
paragraph = partitioner._document.paragraphs[2]
|
|
|
|
emphasized_texts = list(partitioner._iter_paragraph_emphasis(paragraph))
|
2023-08-04 14:04:12 -07:00
|
|
|
assert paragraph.text == ""
|
|
|
|
assert emphasized_texts == []
|
|
|
|
|
2023-09-19 15:32:46 -07:00
|
|
|
paragraph = partitioner._document.paragraphs[3]
|
|
|
|
emphasized_texts = list(partitioner._iter_paragraph_emphasis(paragraph))
|
2023-08-04 14:04:12 -07:00
|
|
|
assert paragraph.text == "I am a normal text."
|
|
|
|
assert emphasized_texts == []
|
|
|
|
|
|
|
|
|
2023-09-19 15:32:46 -07:00
|
|
|
def test_iter_table_emphasis(expected_emphasized_texts: List[Dict[str, str]]):
|
|
|
|
partitioner = _DocxPartitioner(
|
2023-09-19 22:26:36 -06:00
|
|
|
"example-docs/fake-doc-emphasized-text.docx",
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
False,
|
2023-10-23 17:11:53 -07:00
|
|
|
True,
|
2023-09-19 22:26:36 -06:00
|
|
|
None,
|
2023-09-19 15:32:46 -07:00
|
|
|
)
|
|
|
|
table = partitioner._document.tables[0]
|
|
|
|
emphasized_texts = list(partitioner._iter_table_emphasis(table))
|
2023-08-15 21:33:06 -07:00
|
|
|
assert emphasized_texts == expected_emphasized_texts
|
|
|
|
|
|
|
|
|
2023-09-19 15:32:46 -07:00
|
|
|
def test_table_emphasis(
|
2023-09-19 22:26:36 -06:00
|
|
|
expected_emphasized_text_contents: List[str],
|
|
|
|
expected_emphasized_text_tags: List[str],
|
2023-08-15 21:33:06 -07:00
|
|
|
):
|
2023-09-19 15:32:46 -07:00
|
|
|
partitioner = _DocxPartitioner(
|
2023-09-19 22:26:36 -06:00
|
|
|
"example-docs/fake-doc-emphasized-text.docx",
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
False,
|
2023-10-23 17:11:53 -07:00
|
|
|
True,
|
2023-09-19 22:26:36 -06:00
|
|
|
None,
|
2023-08-15 21:33:06 -07:00
|
|
|
)
|
2023-09-19 15:32:46 -07:00
|
|
|
table = partitioner._document.tables[0]
|
|
|
|
emphasized_text_contents, emphasized_text_tags = partitioner._table_emphasis(table)
|
2023-08-15 21:33:06 -07:00
|
|
|
assert emphasized_text_contents == expected_emphasized_text_contents
|
|
|
|
assert emphasized_text_tags == expected_emphasized_text_tags
|
|
|
|
|
2023-08-04 14:04:12 -07:00
|
|
|
|
2023-08-15 21:33:06 -07:00
|
|
|
def test_partition_docx_grabs_emphasized_texts(
|
2023-11-01 22:22:17 -07:00
|
|
|
expected_emphasized_text_contents: List[str],
|
|
|
|
expected_emphasized_text_tags: List[str],
|
2023-08-15 21:33:06 -07:00
|
|
|
):
|
2023-11-01 22:22:17 -07:00
|
|
|
elements = partition_docx(example_doc_path("fake-doc-emphasized-text.docx"))
|
2023-08-04 14:04:12 -07:00
|
|
|
|
|
|
|
assert isinstance(elements[0], Table)
|
2023-08-15 21:33:06 -07:00
|
|
|
assert elements[0].metadata.emphasized_text_contents == expected_emphasized_text_contents
|
|
|
|
assert elements[0].metadata.emphasized_text_tags == expected_emphasized_text_tags
|
2023-08-04 14:04:12 -07:00
|
|
|
|
|
|
|
assert elements[1] == NarrativeText("I am a bold italic bold-italic text.")
|
2023-08-15 21:33:06 -07:00
|
|
|
assert elements[1].metadata.emphasized_text_contents == expected_emphasized_text_contents
|
|
|
|
assert elements[1].metadata.emphasized_text_tags == expected_emphasized_text_tags
|
2023-08-04 14:04:12 -07:00
|
|
|
|
|
|
|
assert elements[2] == NarrativeText("I am a normal text.")
|
2023-08-15 21:33:06 -07:00
|
|
|
assert elements[2].metadata.emphasized_text_contents is None
|
|
|
|
assert elements[2].metadata.emphasized_text_tags is None
|
2023-08-29 16:59:26 -04:00
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
def test_partition_docx_with_json(mock_document_file_path: str):
|
|
|
|
elements = partition_docx(mock_document_file_path)
|
2023-10-12 12:47:55 -07:00
|
|
|
assert_round_trips_through_JSON(elements)
|
2023-09-11 16:00:14 -05:00
|
|
|
|
|
|
|
|
2023-09-27 11:32:46 -04:00
|
|
|
def test_parse_category_depth_by_style():
|
2023-10-23 17:11:53 -07:00
|
|
|
partitioner = _DocxPartitioner(
|
|
|
|
"example-docs/category-level.docx",
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
False,
|
|
|
|
True,
|
|
|
|
None,
|
|
|
|
)
|
2023-09-27 11:32:46 -04:00
|
|
|
|
|
|
|
# Category depths are 0-indexed and relative to the category type
|
|
|
|
# Title, list item, bullet, narrative text, etc.
|
|
|
|
test_cases = [
|
|
|
|
(0, "Call me Ishmael."),
|
|
|
|
(0, "A Heading 1"),
|
|
|
|
(0, "Whenever I find myself growing grim"),
|
|
|
|
(0, "A top level list item"),
|
|
|
|
(1, "Next level"),
|
|
|
|
(1, "Same"),
|
|
|
|
(0, "Second top-level list item"),
|
|
|
|
(0, "whenever I find myself involuntarily"),
|
|
|
|
(0, ""), # Empty paragraph
|
|
|
|
(1, "A Heading 2"),
|
|
|
|
(0, "This is my substitute for pistol and ball"),
|
|
|
|
(0, "Another Heading 1"),
|
|
|
|
(0, "There now is your insular city"),
|
|
|
|
]
|
|
|
|
|
|
|
|
paragraphs = partitioner._document.paragraphs
|
|
|
|
for idx, (depth, text) in enumerate(test_cases):
|
|
|
|
paragraph = paragraphs[idx]
|
|
|
|
actual_depth = partitioner._parse_category_depth_by_style(paragraph)
|
|
|
|
assert text in paragraph.text, f"paragraph[{[idx]}].text does not contain {text}"
|
|
|
|
assert (
|
|
|
|
actual_depth == depth
|
|
|
|
), f"expected paragraph[{idx}] to have depth=={depth}, got {actual_depth}"
|
|
|
|
|
|
|
|
|
|
|
|
def test_parse_category_depth_by_style_name():
|
2023-10-23 17:11:53 -07:00
|
|
|
partitioner = _DocxPartitioner(None, None, None, False, True, None)
|
2023-09-27 11:32:46 -04:00
|
|
|
|
|
|
|
test_cases = [
|
|
|
|
(0, "Heading 1"),
|
|
|
|
(1, "Heading 2"),
|
|
|
|
(2, "Heading 3"),
|
|
|
|
(1, "Subtitle"),
|
|
|
|
(0, "List"),
|
|
|
|
(1, "List 2"),
|
|
|
|
(2, "List 3"),
|
|
|
|
(0, "List Bullet"),
|
|
|
|
(1, "List Bullet 2"),
|
|
|
|
(2, "List Bullet 3"),
|
|
|
|
(0, "List Number"),
|
|
|
|
(1, "List Number 2"),
|
|
|
|
(2, "List Number 3"),
|
|
|
|
]
|
|
|
|
|
|
|
|
for idx, (depth, text) in enumerate(test_cases):
|
|
|
|
assert (
|
|
|
|
partitioner._parse_category_depth_by_style_name(text) == depth
|
|
|
|
), f"test case {test_cases[idx]} failed"
|
|
|
|
|
|
|
|
|
|
|
|
def test_parse_category_depth_by_style_ilvl():
|
2023-10-23 17:11:53 -07:00
|
|
|
partitioner = _DocxPartitioner(None, None, None, False, True, None)
|
2023-09-27 11:32:46 -04:00
|
|
|
assert partitioner._parse_category_depth_by_style_ilvl() == 0
|
2023-10-03 09:40:34 -07:00
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
def test_add_chunking_strategy_on_partition_docx_default_args():
|
|
|
|
chunk_elements = partition_docx(
|
|
|
|
example_doc_path("handbook-1p.docx"), chunking_strategy="by_title"
|
|
|
|
)
|
|
|
|
elements = partition_docx(example_doc_path("handbook-1p.docx"))
|
2023-10-03 09:40:34 -07:00
|
|
|
chunks = chunk_by_title(elements)
|
|
|
|
|
|
|
|
assert chunk_elements != elements
|
|
|
|
assert chunk_elements == chunks
|
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
def test_add_chunking_strategy_on_partition_docx():
|
|
|
|
docx_path = example_doc_path("fake-doc-emphasized-text.docx")
|
|
|
|
|
2023-10-03 09:40:34 -07:00
|
|
|
chunk_elements = partition_docx(
|
2023-11-01 22:22:17 -07:00
|
|
|
docx_path, chunking_strategy="by_title", max_characters=9, combine_text_under_n_chars=5
|
2023-10-03 09:40:34 -07:00
|
|
|
)
|
2023-11-01 22:22:17 -07:00
|
|
|
elements = partition_docx(docx_path)
|
2023-10-03 09:40:34 -07:00
|
|
|
chunks = chunk_by_title(elements, max_characters=9, combine_text_under_n_chars=5)
|
2023-10-09 12:42:36 -07:00
|
|
|
|
2023-10-03 09:40:34 -07:00
|
|
|
assert chunk_elements == chunks
|
2023-10-09 12:42:36 -07:00
|
|
|
assert elements != chunk_elements
|
|
|
|
for chunk in chunks:
|
2023-11-01 22:22:17 -07:00
|
|
|
assert isinstance(chunk, (CompositeElement, TableChunk))
|
2023-10-09 12:42:36 -07:00
|
|
|
assert len(chunk.text) <= 9
|
2023-10-10 20:47:56 -05:00
|
|
|
|
|
|
|
|
|
|
|
def test_partition_docx_element_metadata_has_languages():
|
|
|
|
filename = "example-docs/handbook-1p.docx"
|
|
|
|
elements = partition_docx(filename=filename)
|
|
|
|
assert elements[0].metadata.languages == ["eng"]
|
|
|
|
|
|
|
|
|
|
|
|
def test_partition_docx_respects_detect_language_per_element():
|
|
|
|
filename = "example-docs/language-docs/eng_spa_mult.docx"
|
|
|
|
elements = partition_docx(filename=filename, detect_language_per_element=True)
|
|
|
|
langs = [element.metadata.languages for element in elements]
|
|
|
|
assert langs == [["eng"], ["spa", "eng"], ["eng"], ["eng"], ["spa"]]
|
|
|
|
|
|
|
|
|
|
|
|
def test_partition_docx_respects_languages_arg():
|
|
|
|
filename = "example-docs/handbook-1p.docx"
|
|
|
|
elements = partition_docx(filename=filename, languages=["deu"])
|
|
|
|
assert elements[0].metadata.languages == ["deu"]
|
|
|
|
|
|
|
|
|
|
|
|
def test_partition_docx_raises_TypeError_for_invalid_languages():
|
|
|
|
with pytest.raises(TypeError):
|
|
|
|
filename = "example-docs/handbook-1p.docx"
|
2023-10-12 23:26:14 -07:00
|
|
|
partition_docx(
|
|
|
|
filename=filename,
|
|
|
|
languages="eng", # pyright: ignore[reportGeneralTypeIssues]
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def test_partition_docx_includes_hyperlink_metadata():
|
2023-11-01 22:22:17 -07:00
|
|
|
elements = cast(List[Text], partition_docx(example_doc_path("hlink-meta.docx")))
|
2023-10-12 23:26:14 -07:00
|
|
|
|
|
|
|
# -- regular paragraph, no hyperlinks --
|
|
|
|
element = elements[0]
|
|
|
|
assert element.text == "One"
|
|
|
|
metadata = element.metadata
|
|
|
|
assert metadata.links is None
|
|
|
|
assert metadata.link_texts is None
|
|
|
|
assert metadata.link_urls is None
|
|
|
|
|
|
|
|
# -- paragraph with "internal-jump" hyperlinks, no URL --
|
|
|
|
element = elements[1]
|
|
|
|
assert element.text == "Two with link to bookmark."
|
|
|
|
metadata = element.metadata
|
|
|
|
assert metadata.links is None
|
|
|
|
assert metadata.link_texts is None
|
|
|
|
assert metadata.link_urls is None
|
|
|
|
|
|
|
|
# -- paragraph with external link, no fragment --
|
|
|
|
element = elements[2]
|
|
|
|
assert element.text == "Three with link to foo.com."
|
|
|
|
metadata = element.metadata
|
|
|
|
assert metadata.links == [
|
|
|
|
{
|
|
|
|
"start_index": 11,
|
|
|
|
"text": "link to foo.com",
|
|
|
|
"url": "https://foo.com",
|
2023-10-17 08:45:12 -04:00
|
|
|
},
|
2023-10-12 23:26:14 -07:00
|
|
|
]
|
|
|
|
assert metadata.link_texts == ["link to foo.com"]
|
|
|
|
assert metadata.link_urls == ["https://foo.com"]
|
|
|
|
|
|
|
|
# -- paragraph with external link that has query string --
|
|
|
|
element = elements[3]
|
|
|
|
assert element.text == "Four with link to foo.com searching for bar."
|
|
|
|
metadata = element.metadata
|
|
|
|
assert metadata.links == [
|
|
|
|
{
|
|
|
|
"start_index": 10,
|
|
|
|
"text": "link to foo.com searching for bar",
|
|
|
|
"url": "https://foo.com?q=bar",
|
2023-10-17 08:45:12 -04:00
|
|
|
},
|
2023-10-12 23:26:14 -07:00
|
|
|
]
|
|
|
|
assert metadata.link_texts == ["link to foo.com searching for bar"]
|
|
|
|
assert metadata.link_urls == ["https://foo.com?q=bar"]
|
|
|
|
|
|
|
|
# -- paragraph with external link with separate URI fragment --
|
|
|
|
element = elements[4]
|
|
|
|
assert element.text == "Five with link to foo.com introduction section."
|
|
|
|
metadata = element.metadata
|
|
|
|
assert metadata.links == [
|
|
|
|
{
|
|
|
|
"start_index": 10,
|
|
|
|
"text": "link to foo.com introduction section",
|
|
|
|
"url": "http://foo.com/#intro",
|
2023-10-17 08:45:12 -04:00
|
|
|
},
|
2023-10-12 23:26:14 -07:00
|
|
|
]
|
|
|
|
assert metadata.link_texts == ["link to foo.com introduction section"]
|
|
|
|
assert metadata.link_urls == ["http://foo.com/#intro"]
|
|
|
|
|
|
|
|
# -- paragraph with link to file on local filesystem --
|
|
|
|
element = elements[7]
|
|
|
|
assert element.text == "Eight with link to file."
|
|
|
|
metadata = element.metadata
|
|
|
|
assert metadata.links == [
|
|
|
|
{
|
|
|
|
"start_index": 11,
|
|
|
|
"text": "link to file",
|
|
|
|
"url": "court-exif.jpg",
|
2023-10-17 08:45:12 -04:00
|
|
|
},
|
2023-10-12 23:26:14 -07:00
|
|
|
]
|
|
|
|
assert metadata.link_texts == ["link to file"]
|
|
|
|
assert metadata.link_urls == ["court-exif.jpg"]
|
|
|
|
|
|
|
|
# -- regular paragraph, no hyperlinks, ensure no state is retained --
|
|
|
|
element = elements[8]
|
|
|
|
assert element.text == "Nine."
|
|
|
|
metadata = element.metadata
|
|
|
|
assert metadata.links is None
|
|
|
|
assert metadata.link_texts is None
|
|
|
|
assert metadata.link_urls is None
|
|
|
|
|
|
|
|
|
|
|
|
# -- module-level fixtures -----------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
2023-11-01 22:22:17 -07:00
|
|
|
def example_doc_path(filename: str) -> str:
|
|
|
|
"""String path to a file in the example-docs/ directory."""
|
|
|
|
return str(pathlib.Path(__file__).parent.parent.parent.parent / "example-docs" / filename)
|
|
|
|
|
|
|
|
|
2023-10-12 23:26:14 -07:00
|
|
|
@pytest.fixture()
|
2023-11-01 22:22:17 -07:00
|
|
|
def expected_elements() -> List[Text]:
|
2023-10-12 23:26:14 -07:00
|
|
|
return [
|
|
|
|
Title("These are a few of my favorite things:"),
|
|
|
|
ListItem("Parrots"),
|
|
|
|
ListItem("Hockey"),
|
|
|
|
Title("Analysis"),
|
|
|
|
NarrativeText("This is my first thought. This is my second thought."),
|
|
|
|
NarrativeText("This is my third thought."),
|
|
|
|
Text("2023"),
|
|
|
|
Address("DOYLESTOWN, PA 18901"),
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture()
|
2023-11-01 22:22:17 -07:00
|
|
|
def expected_emphasized_text_contents() -> List[str]:
|
2023-10-12 23:26:14 -07:00
|
|
|
return ["bold", "italic", "bold-italic", "bold-italic"]
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture()
|
2023-11-01 22:22:17 -07:00
|
|
|
def expected_emphasized_text_tags() -> List[str]:
|
2023-10-12 23:26:14 -07:00
|
|
|
return ["b", "i", "b", "i"]
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture()
|
|
|
|
def expected_emphasized_texts():
|
|
|
|
return [
|
|
|
|
{"text": "bold", "tag": "b"},
|
|
|
|
{"text": "italic", "tag": "i"},
|
|
|
|
{"text": "bold-italic", "tag": "b"},
|
|
|
|
{"text": "bold-italic", "tag": "i"},
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture()
|
|
|
|
def mock_document():
|
|
|
|
document = docx.Document()
|
|
|
|
|
|
|
|
document.add_paragraph("These are a few of my favorite things:", style="Heading 1")
|
|
|
|
# NOTE(robinson) - this should get picked up as a list item due to the •
|
|
|
|
document.add_paragraph("• Parrots", style="Normal")
|
|
|
|
# NOTE(robinson) - this should get dropped because it's empty
|
|
|
|
document.add_paragraph("• ", style="Normal")
|
|
|
|
document.add_paragraph("Hockey", style="List Bullet")
|
|
|
|
# NOTE(robinson) - this should get dropped because it's empty
|
|
|
|
document.add_paragraph("", style="List Bullet")
|
|
|
|
# NOTE(robinson) - this should get picked up as a title
|
|
|
|
document.add_paragraph("Analysis", style="Normal")
|
|
|
|
# NOTE(robinson) - this should get dropped because it is empty
|
|
|
|
document.add_paragraph("", style="Normal")
|
|
|
|
# NOTE(robinson) - this should get picked up as a narrative text
|
|
|
|
document.add_paragraph("This is my first thought. This is my second thought.", style="Normal")
|
|
|
|
document.add_paragraph("This is my third thought.", style="Body Text")
|
|
|
|
# NOTE(robinson) - this should just be regular text
|
|
|
|
document.add_paragraph("2023")
|
|
|
|
# NOTE(robinson) - this should be an address
|
|
|
|
document.add_paragraph("DOYLESTOWN, PA 18901")
|
|
|
|
|
|
|
|
return document
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture()
|
2023-11-01 22:22:17 -07:00
|
|
|
def mock_document_file_path(mock_document: Document, tmp_path: pathlib.Path) -> str:
|
2023-10-12 23:26:14 -07:00
|
|
|
filename = str(tmp_path / "mock_document.docx")
|
|
|
|
mock_document.save(filename)
|
|
|
|
return filename
|