2023-09-26 19:24:21 -04:00
#!/usr/bin/env bash
set -e
2023-11-01 15:23:44 -04:00
SRC_PATH = $( dirname " $( realpath " $0 " ) " )
SCRIPT_DIR = $( dirname " $SRC_PATH " )
2023-09-26 19:24:21 -04:00
cd " $SCRIPT_DIR " /.. || exit 1
OUTPUT_FOLDER_NAME = sharepoint-azure-dest
2023-11-02 16:41:56 -05:00
OUTPUT_ROOT = ${ OUTPUT_ROOT :- $SCRIPT_DIR }
OUTPUT_DIR = $OUTPUT_ROOT /structured-output/$OUTPUT_FOLDER_NAME
WORK_DIR = $OUTPUT_ROOT /workdir/$OUTPUT_FOLDER_NAME
2023-09-26 19:24:21 -04:00
DOWNLOAD_DIR = $SCRIPT_DIR /download/$OUTPUT_FOLDER_NAME
DESTINATION_INDEX = " utic-test-ingest-fixtures-output- $( date +%s) "
# The vector configs on the schema currently only exist on versions:
# 2023-07-01-Preview, 2021-04-30-Preview, 2020-06-30-Preview
API_VERSION = 2023-07-01-Preview
2023-10-02 16:47:24 -04:00
CI = ${ CI :- "false" }
2023-09-26 19:24:21 -04:00
if [ -z " $SHAREPOINT_CLIENT_ID " ] || [ -z " $SHAREPOINT_CRED " ] ; then
echo "Skipping Sharepoint ingest test because the SHAREPOINT_CLIENT_ID or SHAREPOINT_CRED env var is not set."
exit 0
fi
2023-10-13 01:38:08 +01:00
if [ -z " $SHAREPOINT_PERMISSIONS_APP_ID " ] || [ -z " $SHAREPOINT_PERMISSIONS_APP_CRED " ] || [ -z " $SHAREPOINT_PERMISSIONS_TENANT " ] ; then
echo "Skipping Sharepoint ingest test because the SHAREPOINT_PERMISSIONS_APP_ID, SHAREPOINT_PERMISSIONS_APP_CRED, or SHAREPOINT_PERMISSIONS_TENANT env var is not set."
exit 0
fi
2023-09-26 19:24:21 -04:00
if [ -z " $OPENAI_API_KEY " ] ; then
echo "Skipping Sharepoint embedding ingest test because the OPENAI_API_KEY env var is not set."
exit 0
fi
if [ -z " $AZURE_SEARCH_ENDPOINT " ] && [ -z " $AZURE_SEARCH_API_KEY " ] ; then
echo "Skipping Sharepoint Azure Cognitive Search ingest test because neither AZURE_SEARCH_ENDPOINT nor AZURE_SEARCH_API_KEY env vars are set."
exit 0
fi
2023-10-02 16:47:24 -04:00
# shellcheck disable=SC1091
source " $SCRIPT_DIR " /cleanup.sh
2023-09-26 19:24:21 -04:00
function cleanup {
response_code = $( curl -s -o /dev/null -w "%{http_code}" \
" https://utic-test-ingest-fixtures.search.windows.net/indexes/ $DESTINATION_INDEX ?api-version= $API_VERSION " \
--header " api-key: $AZURE_SEARCH_API_KEY " \
--header 'content-type: application/json' )
if [ " $response_code " = = "200" ] ; then
echo " deleting index $DESTINATION_INDEX "
curl -X DELETE \
" https://utic-test-ingest-fixtures.search.windows.net/indexes/ $DESTINATION_INDEX ?api-version= $API_VERSION " \
--header " api-key: $AZURE_SEARCH_API_KEY " \
--header 'content-type: application/json'
else
echo " Index $DESTINATION_INDEX does not exist, nothing to delete "
fi
2023-10-02 16:47:24 -04:00
cleanup_dir " $OUTPUT_DIR "
refactor: unstructured ingest as a pipeline (#1551)
### Description
As we add more and more steps to the pipeline (i.e. chunking, embedding,
table manipulation), it would help seperate the responsibility of each
of these into their own processes, running each in parallel using json
files to share data across. This will also help guarantee data is
serializable if this code was used in an actual pipeline. Following is a
flow diagram of the proposed changes. As part of this change:
* A parent pipeline class will be responsible for running each `node`,
which can optionally be run via multiprocessing if it supports it, or
not. Possible nodes at this moment:
* Doc factory: creates all the ingest docs via the source connector
* Source: reads/downloads all of the content to process to the local
filesystem to the location set by the `download_dir` parameter.
* Partition: runs partition on all of the downloaded content in json
format.
* Any number of reformat nodes that modify the partitioned content. This
can include chunking, embedding, etc.
* Write: push the final json into the destination via the destination
connector
* This pipeline relies on the information of the ingest docs to be
available via their serialization. An optimization was introduced with
the `IngestDocJsonMixin` which adds in all the `@property` fields to the
serialized json already being created via the `DataClassJsonMixin`
* For all intermediate steps (partitioning, reformatting), the content
is saved to a dedicated location on the local filesystem. Right now it's
set to `$HOME/.cache/unstructured/ingest/pipeline/STEP_NAME/`.
* Minor changes: made sense to move some of the config parameters
between the read and partition configs when I explicitly divided the
responsibility to download vs partition the content in the pipeline.
* The pipeline class only makes the doc factory, source and partition
nodes required, keeping with the logic that has been supported so far.
All reformatting nodes and write node are optional.
* Long term, there should also be some changes to the base configs
supported by the CLI to support pipeline specific configs, but for now
what exists was used to minimize changes in this PR.
* Final step to copy the final output to the location designated by the
`_output_filename` value of the ingest doc.
* Hashing occurs at each step by hashing the parameters of that step
(i.e. partition configs) along with the previous step via the filename
used. This allows each step to be the same _if_ all the parameters for
it have not changed and the content so far is the same.
* The only data that is shared and has writes to across processes is the
dictionary of ingest json data. This dict is created using the
`multiprocessing.manager.DictProxy` to make sure any interaction with it
is behind a lock.
### Minor refactors included:
* Utility methods added to extract configs from the click options
* Utility method to add common options to click commands.
* All writers moved to using the class approach which extracts a lot of
the common code so there's less copy-paste when new runners are added.
* Use `@property` for source metadata on base ingest doc to add logic to
call `update_source_metadata` if it's still `None` at the time it's
fetched.
### Additional bug fixes included
* Fsspec connectors were not serializable due to the `ingest_doc_cls`.
This was removed from the fields captured by the `@dataclass` decorator
and added in a `__post_init__` method.
* Various reddit connector params were missing. This doesn't have an
explicit ingest test at the moment so was never caught.
* Fsspec connector had the parent `update_source_metadata` misnamed as
`update_source_metadata_metadata` so it was never being called.
### Flow Diagram

2023-10-06 14:49:29 -04:00
cleanup_dir " $WORK_DIR "
2023-10-02 16:47:24 -04:00
if [ " $CI " = = "true" ] ; then
cleanup_dir " $DOWNLOAD_DIR "
fi
2023-09-26 19:24:21 -04:00
}
trap cleanup EXIT
# Create index
echo " Creating index $DESTINATION_INDEX "
response_code = $( curl -s -o /dev/null -w "%{http_code}" -X PUT \
" https://utic-test-ingest-fixtures.search.windows.net/indexes/ $DESTINATION_INDEX ?api-version= $API_VERSION " \
--header " api-key: $AZURE_SEARCH_API_KEY " \
--header 'content-type: application/json' \
--data " @ $SCRIPT_DIR /files/azure_cognitive_index_schema.json " )
if [ " $response_code " -lt 400 ] ; then
echo " Index creation success: $response_code "
else
echo " Index creation failure: $response_code "
exit 1
fi
2023-11-02 16:41:56 -05:00
RUN_SCRIPT = ${ RUN_SCRIPT :- ./unstructured/ingest/main.py }
PYTHONPATH = ${ PYTHONPATH :- . } " $RUN_SCRIPT " \
2023-09-26 19:24:21 -04:00
sharepoint \
--download-dir " $DOWNLOAD_DIR " \
--metadata-exclude file_directory,metadata.data_source.date_processed,metadata.last_modified,metadata.detection_class_prob,metadata.parent_id,metadata.category_depth \
--num-processes 2 \
--strategy hi_res \
--preserve-downloads \
--reprocess \
--output-dir " $OUTPUT_DIR " \
--verbose \
--client-cred " $SHAREPOINT_CRED " \
--client-id " $SHAREPOINT_CLIENT_ID " \
--site " $SHAREPOINT_SITE " \
2023-10-13 01:38:08 +01:00
--permissions-application-id " $SHAREPOINT_PERMISSIONS_APP_ID " \
--permissions-client-cred " $SHAREPOINT_PERMISSIONS_APP_CRED " \
--permissions-tenant " $SHAREPOINT_PERMISSIONS_TENANT " \
2023-09-26 19:24:21 -04:00
--path "Shared Documents" \
--recursive \
--embedding-api-key " $OPENAI_API_KEY " \
2023-09-27 17:05:55 -04:00
--chunk-elements \
--chunk-multipage-sections \
refactor: unstructured ingest as a pipeline (#1551)
### Description
As we add more and more steps to the pipeline (i.e. chunking, embedding,
table manipulation), it would help seperate the responsibility of each
of these into their own processes, running each in parallel using json
files to share data across. This will also help guarantee data is
serializable if this code was used in an actual pipeline. Following is a
flow diagram of the proposed changes. As part of this change:
* A parent pipeline class will be responsible for running each `node`,
which can optionally be run via multiprocessing if it supports it, or
not. Possible nodes at this moment:
* Doc factory: creates all the ingest docs via the source connector
* Source: reads/downloads all of the content to process to the local
filesystem to the location set by the `download_dir` parameter.
* Partition: runs partition on all of the downloaded content in json
format.
* Any number of reformat nodes that modify the partitioned content. This
can include chunking, embedding, etc.
* Write: push the final json into the destination via the destination
connector
* This pipeline relies on the information of the ingest docs to be
available via their serialization. An optimization was introduced with
the `IngestDocJsonMixin` which adds in all the `@property` fields to the
serialized json already being created via the `DataClassJsonMixin`
* For all intermediate steps (partitioning, reformatting), the content
is saved to a dedicated location on the local filesystem. Right now it's
set to `$HOME/.cache/unstructured/ingest/pipeline/STEP_NAME/`.
* Minor changes: made sense to move some of the config parameters
between the read and partition configs when I explicitly divided the
responsibility to download vs partition the content in the pipeline.
* The pipeline class only makes the doc factory, source and partition
nodes required, keeping with the logic that has been supported so far.
All reformatting nodes and write node are optional.
* Long term, there should also be some changes to the base configs
supported by the CLI to support pipeline specific configs, but for now
what exists was used to minimize changes in this PR.
* Final step to copy the final output to the location designated by the
`_output_filename` value of the ingest doc.
* Hashing occurs at each step by hashing the parameters of that step
(i.e. partition configs) along with the previous step via the filename
used. This allows each step to be the same _if_ all the parameters for
it have not changed and the content so far is the same.
* The only data that is shared and has writes to across processes is the
dictionary of ingest json data. This dict is created using the
`multiprocessing.manager.DictProxy` to make sure any interaction with it
is behind a lock.
### Minor refactors included:
* Utility methods added to extract configs from the click options
* Utility method to add common options to click commands.
* All writers moved to using the class approach which extracts a lot of
the common code so there's less copy-paste when new runners are added.
* Use `@property` for source metadata on base ingest doc to add logic to
call `update_source_metadata` if it's still `None` at the time it's
fetched.
### Additional bug fixes included
* Fsspec connectors were not serializable due to the `ingest_doc_cls`.
This was removed from the fields captured by the `@dataclass` decorator
and added in a `__post_init__` method.
* Various reddit connector params were missing. This doesn't have an
explicit ingest test at the moment so was never caught.
* Fsspec connector had the parent `update_source_metadata` misnamed as
`update_source_metadata_metadata` so it was never being called.
### Flow Diagram

2023-10-06 14:49:29 -04:00
--work-dir " $WORK_DIR " \
2023-09-26 19:24:21 -04:00
azure-cognitive-search \
--key " $AZURE_SEARCH_API_KEY " \
--endpoint " $AZURE_SEARCH_ENDPOINT " \
--index " $DESTINATION_INDEX "
# It can take some time for the index to catch up with the content that was written, this check between 10s sleeps
# to give it that time process the writes. Will timeout after checking for a minute.
docs_count_remote = 0
attempt = 1
while [ " $docs_count_remote " -eq 0 ] && [ " $attempt " -lt 6 ] ; do
echo " attempt $attempt : sleeping 10 seconds to let index finish catching up after writes "
sleep 10
# Check the contents of the index
docs_count_remote = $( curl " https://utic-test-ingest-fixtures.search.windows.net/indexes/ $DESTINATION_INDEX /docs/\$count?api-version= $API_VERSION " \
--header " api-key: $AZURE_SEARCH_API_KEY " \
--header 'content-type: application/json' | jq)
echo " docs count pulled from Azure: $docs_count_remote "
attempt = $(( attempt+1))
done
docs_count_local = 0
for i in $( jq length " $OUTPUT_DIR " /**/*.json) ; do
docs_count_local = $(( docs_count_local+i)) ;
done
if [ " $docs_count_remote " -ne " $docs_count_local " ] ; then
echo " Number of docs $docs_count_remote doesn't match the expected docs: $docs_count_local "
exit 1
fi