mirror of
https://github.com/Unstructured-IO/unstructured.git
synced 2025-07-12 11:35:53 +00:00

### Description * Update all existing connector docs to use new pipeline approach ### Additional changes: * Some defaults were set for the runners to match those in the configs to make those easy to handle, i.e. the biomed runner: ```python max_retries: int = 5, max_request_time: int = 45, decay: float = 0.3, ```
103 lines
3.3 KiB
ReStructuredText
103 lines
3.3 KiB
ReStructuredText
S3
|
|
==========
|
|
Connect S3 to your preprocessing pipeline, and batch process all your documents using ``unstructured-ingest`` to store structured outputs locally on your filesystem.
|
|
|
|
First you'll need to install the S3 dependencies as shown here.
|
|
|
|
.. code:: shell
|
|
|
|
pip install "unstructured[s3]"
|
|
|
|
Run Locally
|
|
-----------
|
|
|
|
.. tabs::
|
|
|
|
.. tab:: Shell
|
|
|
|
.. code:: shell
|
|
|
|
unstructured-ingest \
|
|
s3 \
|
|
--remote-url s3://utic-dev-tech-fixtures/small-pdf-set/ \
|
|
--anonymous \
|
|
--output-dir s3-small-batch-output \
|
|
--num-processes 2
|
|
|
|
.. tab:: Python
|
|
|
|
.. code:: python
|
|
|
|
import os
|
|
|
|
from unstructured.ingest.interfaces import PartitionConfig, ProcessorConfig, ReadConfig
|
|
from unstructured.ingest.runner import S3Runner
|
|
|
|
if __name__ == "__main__":
|
|
runner = S3Runner(
|
|
processor_config=ProcessorConfig(
|
|
verbose=True,
|
|
output_dir="s3-small-batch-output",
|
|
num_processes=2,
|
|
),
|
|
read_config=ReadConfig(),
|
|
partition_config=PartitionConfig(),
|
|
)
|
|
runner.run(
|
|
remote_url="s3://utic-dev-tech-fixtures/small-pdf-set/",
|
|
anonymous=True,
|
|
)
|
|
|
|
Run via the API
|
|
---------------
|
|
|
|
You can also use upstream connectors with the ``unstructured`` API. For this you'll need to use the ``--partition-by-api`` flag and pass in your API key with ``--api-key``.
|
|
|
|
.. tabs::
|
|
|
|
.. tab:: Shell
|
|
|
|
.. code:: shell
|
|
|
|
unstructured-ingest \
|
|
s3 \
|
|
--remote-url s3://utic-dev-tech-fixtures/small-pdf-set/ \
|
|
--anonymous \
|
|
--output-dir s3-small-batch-output \
|
|
--num-processes 2 \
|
|
--partition-by-api \
|
|
--api-key "<UNSTRUCTURED-API-KEY>"
|
|
|
|
.. tab:: Python
|
|
|
|
.. code:: python
|
|
|
|
import os
|
|
|
|
from unstructured.ingest.interfaces import PartitionConfig, ProcessorConfig, ReadConfig
|
|
from unstructured.ingest.runner import S3Runner
|
|
|
|
if __name__ == "__main__":
|
|
runner = S3Runner(
|
|
processor_config=ProcessorConfig(
|
|
verbose=True,
|
|
output_dir="s3-small-batch-output",
|
|
num_processes=2,
|
|
),
|
|
read_config=ReadConfig(),
|
|
partition_config=PartitionConfig(
|
|
partition_by_api=True,
|
|
api_key=os.getenv("UNSTRUCTURED_API_KEY"),
|
|
),
|
|
)
|
|
runner.run(
|
|
remote_url="s3://utic-dev-tech-fixtures/small-pdf-set/",
|
|
anonymous=True,
|
|
)
|
|
|
|
Additionally, you will need to pass the ``--partition-endpoint`` if you're running the API locally. You can find more information about the ``unstructured`` API `here <https://github.com/Unstructured-IO/unstructured-api>`_.
|
|
|
|
For a full list of the options the CLI accepts check ``unstructured-ingest s3 --help``.
|
|
|
|
NOTE: Keep in mind that you will need to have all the appropriate extras and dependencies for the file types of the documents contained in your data storage platform if you're running this locally. You can find more information about this in the `installation guide <https://unstructured-io.github.io/unstructured/installing.html>`_.
|