From 6c122b10ba6b0759d19f650d46e8f7307da7d914 Mon Sep 17 00:00:00 2001 From: Michele Dolfi Date: Wed, 13 Nov 2024 16:41:30 +0100 Subject: [PATCH 01/12] update pdf2parquet README Signed-off-by: Michele Dolfi --- transforms/language/pdf2parquet/README.md | 8 +- .../language/pdf2parquet/python/README.md | 117 ++++++++++++++---- transforms/language/pdf2parquet/ray/README.md | 50 +++++++- 3 files changed, 148 insertions(+), 27 deletions(-) diff --git a/transforms/language/pdf2parquet/README.md b/transforms/language/pdf2parquet/README.md index 14373a68c..89a53147d 100644 --- a/transforms/language/pdf2parquet/README.md +++ b/transforms/language/pdf2parquet/README.md @@ -1,10 +1,10 @@ -# PDF2PARQUET Transform +# Pdf2Parquet Transform -The PDF2PARQUET transforms iterate through PDF files or zip of PDF files and generates parquet files -containing the converted document in Markdown format. +The Pdf2Parquet transforms iterate through PDF, Docx, Pptx, Images files or zip of files and generates parquet files +containing the converted document in Markdown or JSON format. -The PDF conversion is using the [Docling package](https://github.com/DS4SD/docling). +The conversion is using the [Docling package](https://github.com/DS4SD/docling). The following runtimes are available: diff --git a/transforms/language/pdf2parquet/python/README.md b/transforms/language/pdf2parquet/python/README.md index a4bd31e06..aaf56669f 100644 --- a/transforms/language/pdf2parquet/python/README.md +++ b/transforms/language/pdf2parquet/python/README.md @@ -1,4 +1,15 @@ -# Ingest PDF to Parquet +# Ingest PDF to Parquet Transform + +Please see the set of +[transform project conventions](../../../README.md#transform-project-conventions) +for details on general project conventions, transform configuration, +testing and IDE set up. + +## Contributors + +- Michele Dolfi (dol@zurich.ibm.com) + +## Description This tranforms iterate through document files or zip of files and generates parquet files containing the converted document in Markdown or JSON format. @@ -7,6 +18,9 @@ The PDF conversion is using the [Docling package](https://github.com/DS4SD/docli The Docling configuration in DPK is tuned for best results when running large batch ingestions. For more details on the multiple configuration options, please refer to the official [Docling documentation](https://ds4sd.github.io/docling/). + +### Input files + This transform supports the following input formats: - PDF documents @@ -17,32 +31,33 @@ This transform supports the following input formats: - Markdown documents - ASCII Docs documents +The input documents can be provided in a folder structure, or as a zip archive. +Please see the configuration section for specifying the input files. -## Output format -The output format will contain all the columns of the metadata CSV file, -with the addition of the following columns +### Output format -```jsonc -{ - "source_filename": "string", // the basename of the source archive or file - "filename": "string", // the basename of the PDF file - "contents": "string", // the content of the PDF - "document_id": "string", // the document id, a random uuid4 - "document_hash": "string", // the document hash of the input content - "ext": "string", // the detected file extension - "hash": "string", // the hash of the `contents` column - "size": "string", // the size of `contents` - "date_acquired": "date", // the date when the transform was executing - "num_pages": "number", // number of pages in the PDF - "num_tables": "number", // number of tables in the PDF - "num_doc_elements": "number", // number of document elements in the PDF - "pdf_convert_time": "float", // time taken to convert the document in seconds -} -``` +The output table will contain following columns +| output column name | data type | description | +|-|-|-| +| source_filename | string | the basename of the source archive or file | +| filename | string | the basename of the PDF file | +| contents | string | the content of the PDF | +| document_id | string | the document id, a random uuid4 | +| document_hash | string | the document hash of the input content | +| ext | string | the detected file extension | +| hash | string | the hash of the `contents` column | +| size | string | the size of `contents` | +| date_acquired | date | the date when the transform was executing | +| num_pages | number | number of pages in the PDF | +| num_tables | number | number of tables in the PDF | +| num_doc_elements | number | number of document elements in the PDF | +| pdf_convert_time | float | time taken to convert the document in seconds | -## Parameters + + +## Configuration The transform can be initialized with the following parameters. @@ -58,9 +73,67 @@ The transform can be initialized with the following parameters. | `pdf_backend` | `dlparse_v2` | The PDF backend to use. Valid values are `dlparse_v2`, `dlparse_v1`, `pypdfium2`. | | `double_precision` | `8` | If set, all floating points (e.g. bounding boxes) are rounded to this precision. For tests it is advised to use 0. | + +Example + +```py +{ + "contents_type": "application/json", + "do_ocr": True, +} +``` + +## Usage + +### Launched Command Line Options + When invoking the CLI, the parameters must be set as `--pdf2parquet_`, e.g. `--pdf2parquet_do_ocr=true`. +### Running the samples +To run the samples, use the following `make` targets + +* `run-cli-sample` - runs src/pdf2parquet_transform_python.py using command line args +* `run-local-sample` - runs src/pdf2parquet_local.py +* `run-local-python-sample` - runs src/pdf2parquet_local_python.py + +These targets will activate the virtual environment and set up any configuration needed. +Use the `-n` option of `make` to see the detail of what is done to run the sample. + +For example, +```shell +make run-local-python-sample +... +``` +Then +```shell +ls output +``` +To see results of the transform. + + +### Code example + +TBD (link to the notebook will be provided) + +See the sample script [src/pdf2parquet_local_python.py](src/pdf2parquet_local_python.py). + + +### Transforming data using the transform image + +To use the transform image to transform your data, please refer to the +[running images quickstart](../../../../doc/quick-start/run-transform-image.md), +substituting the name of this transform image and runtime as appropriate. + +## Testing + +Following [the testing strategy of data-processing-lib](../../../../data-processing-lib/doc/transform-testing.md) + +Currently we have: +- [Unit test](transforms/language/pdf2parquet/python/test/test_pdf2parquet_python.py) +- [Integration test](transforms/language/pdf2parquet/python/test/test_pdf2parquet.py) + + ## Credits The PDF document conversion is developed by the AI for Knowledge group in IBM Research Zurich. diff --git a/transforms/language/pdf2parquet/ray/README.md b/transforms/language/pdf2parquet/ray/README.md index 5ef98f645..4db4b47c7 100644 --- a/transforms/language/pdf2parquet/ray/README.md +++ b/transforms/language/pdf2parquet/ray/README.md @@ -1,7 +1,55 @@ -# PDF2PARQUET Ray Transform +# Ingest PDF to Parquet Ray Transform +Please see the set of +[transform project conventions](../../../README.md#transform-project-conventions) +for details on general project conventions, transform configuration, +testing and IDE set up. This module implements the ray version of the [pdf2parquet transform](../python/). +## Summary +This project wraps the [Ingest PDF to Parquet transform](../python) with a Ray runtime. + +## Configuration and command line Options + +Ingest PDF to Parquet configuration and command line options are the same as for the base python transform. + +## Running + +### Launched Command Line Options +When running the transform with the Ray launcher (i.e. TransformLauncher), +In addition to those available to the transform as defined in [here](../python/README.md), +the set of +[ray launcher](../../../../data-processing-lib/doc/ray-launcher-options.md) are available. + +### Running the samples +To run the samples, use the following `make` targets + +* `run-cli-sample` - runs src/pdf2parquet_transform_ray.py using command line args +* `run-local-sample` - runs src/pdf2parquet_local_ray.py +* `run-s3-sample` - runs src/pdf2parquet_s3_ray.py + * Requires prior invocation of `make minio-start` to load data into local minio for S3 access. + +These targets will activate the virtual environment and set up any configuration needed. +Use the `-n` option of `make` to see the detail of what is done to run the sample. + +For example, +```shell +make run-cli-sample +... +``` +Then +```shell +ls output +``` +To see results of the transform. + + +### Transforming data using the transform image + +To use the transform image to transform your data, please refer to the +[running images quickstart](../../../../doc/quick-start/run-transform-image.md), +substituting the name of this transform image and runtime as appropriate. + ## Prometheus metrics From 67694522dd7900d573b0b18c735424807070cf14 Mon Sep 17 00:00:00 2001 From: Michele Dolfi Date: Wed, 13 Nov 2024 16:53:39 +0100 Subject: [PATCH 02/12] add data_files_to_use Signed-off-by: Michele Dolfi --- transforms/language/pdf2parquet/python/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/transforms/language/pdf2parquet/python/README.md b/transforms/language/pdf2parquet/python/README.md index aaf56669f..d9dc2520a 100644 --- a/transforms/language/pdf2parquet/python/README.md +++ b/transforms/language/pdf2parquet/python/README.md @@ -63,6 +63,7 @@ The transform can be initialized with the following parameters. | Parameter | Default | Description | |------------|----------|--------------| +| `data_files_to_use` | - | The files extensions to be considered when running the transform. Example value `['.pdf','.docx','.pptx','.zip']`. For all the supported input formats, see the section above. | | `batch_size` | -1 | Number of documents to be saved in the same result table. A value of -1 will generate one result file for each input file. | | `artifacts_path` | | Path where to Docling models artifacts are located, if unset they will be downloaded and fetched from the [HF_HUB_CACHE](https://huggingface.co/docs/huggingface_hub/en/guides/manage-cache) folder. | | `contents_type` | `text/markdown` | The output type for the `contents` column. Valid types are `text/markdown`, `text/plain` and `application/json`. | @@ -78,6 +79,7 @@ Example ```py { + "data_files_to_use": ast.literal_eval("['.pdf','.docx','.pptx','.zip']"), "contents_type": "application/json", "do_ocr": True, } From 7b592bcb8808c22c4e8256ef9de0a6a66af99f12 Mon Sep 17 00:00:00 2001 From: Michele Dolfi Date: Wed, 13 Nov 2024 18:03:19 +0100 Subject: [PATCH 03/12] doc_chunk README Signed-off-by: Michele Dolfi --- .../language/doc_chunk/python/README.md | 57 +++++++++++++++++-- 1 file changed, 52 insertions(+), 5 deletions(-) diff --git a/transforms/language/doc_chunk/python/README.md b/transforms/language/doc_chunk/python/README.md index 9abca2b79..1ec3a8080 100644 --- a/transforms/language/doc_chunk/python/README.md +++ b/transforms/language/doc_chunk/python/README.md @@ -1,5 +1,16 @@ # Chunk documents Transform +Please see the set of +[transform project conventions](../../../README.md#transform-project-conventions) +for details on general project conventions, transform configuration, +testing and IDE set up. + +## Contributors + +- Michele Dolfi (dol@zurich.ibm.com) + +## Description + This transform is chunking documents. It supports multiple _chunker modules_ (see the `chunking_type` parameter). When using documents converted to JSON, the transform leverages the [Docling Core](https://github.com/DS4SD/docling-core) `HierarchicalChunker` @@ -9,20 +20,26 @@ which provides the required JSON structure. When using documents converted to Markdown, the transform leverages the [Llama Index](https://docs.llamaindex.ai/en/stable/module_guides/loading/node_parsers/modules/#markdownnodeparser) `MarkdownNodeParser`, which is relying on its internal Markdown splitting logic. -## Output format + +### Input + +| input column name | data type | description | +|-|-|-| +| the one specified in _content_column_name_ configuration | string | the content used in this transform | + + +### Output format The output parquet file will contain all the original columns, but the content will be replaced with the individual chunks. -### Tracing the origin of the chunks +#### Tracing the origin of the chunks The transform allows to trace the origin of the chunk with the `source_doc_id` which is set to the value of the `document_id` column (if present) in the input table. The actual name of columns can be customized with the parameters described below. -## Running - -### Parameters +## Configuration The transform can be tuned with the following parameters. @@ -40,6 +57,12 @@ The transform can be tuned with the following parameters. | `output_pageno_column_name` | `page_number` | Column name to store the page number of the chunk in the output table. | | `output_bbox_column_name` | `bbox` | Column name to store the bbox of the chunk in the output table. | + + +## Usage + +### Launched Command Line Options + When invoking the CLI, the parameters must be set as `--doc_chunk_`, e.g. `--doc_chunk_column_name_key=myoutput`. @@ -63,8 +86,32 @@ ls output ``` To see results of the transform. +### Code example + +TBD (link to the notebook will be provided) + +See the sample script [src/doc_chunk_local_python.py](src/doc_chunk_local_python.py). + + ### Transforming data using the transform image To use the transform image to transform your data, please refer to the [running images quickstart](../../../../doc/quick-start/run-transform-image.md), substituting the name of this transform image and runtime as appropriate. + +## Testing + +Following [the testing strategy of data-processing-lib](../../../../data-processing-lib/doc/transform-testing.md) + +Currently we have: +- [Unit test](test/test_doc_chunk_python.py) + + +## Further Resource + +- For the [Docling Core](https://github.com/DS4SD/docling-core) `HierarchicalChunker` + - +- For the Markdown chunker in LlamaIndex + - [Markdown chunking](https://docs.llamaindex.ai/en/stable/module_guides/loading/node_parsers/modules/#markdownnodeparser) +- For the Token Text Splitter in LlamaIndex + - [Token Text Splitter](https://docs.llamaindex.ai/en/stable/api_reference/node_parsers/token_text_splitter/) From 61d72fd30f83c4edf58e136d95c74372dd1fc9e9 Mon Sep 17 00:00:00 2001 From: Michele Dolfi Date: Wed, 13 Nov 2024 18:03:30 +0100 Subject: [PATCH 04/12] text_encoder README Signed-off-by: Michele Dolfi --- .../language/text_encoder/python/README.md | 46 +++++++++++++++++-- 1 file changed, 42 insertions(+), 4 deletions(-) diff --git a/transforms/language/text_encoder/python/README.md b/transforms/language/text_encoder/python/README.md index 4c927d1ed..fa9c54ada 100644 --- a/transforms/language/text_encoder/python/README.md +++ b/transforms/language/text_encoder/python/README.md @@ -1,14 +1,36 @@ # Text Encoder Transform -## Summary +Please see the set of +[transform project conventions](../../../README.md#transform-project-conventions) +for details on general project conventions, transform configuration, +testing and IDE set up. + +## Contributors + +- Michele Dolfi (dol@zurich.ibm.com) + +## Description + This transform is using [sentence encoder models](https://en.wikipedia.org/wiki/Sentence_embedding) to create embedding vectors of the text in each row of the input .parquet table. The embeddings vectors generated by the transform are useful for tasks like sentence similarity, features extraction, etc which are also at the core of retrieval-augmented generation (RAG) applications. +### Input + +| input column name | data type | description | +|-|-|-| +| the one specified in _content_column_name_ configuration | string | the content used in this transform | + + +### Output columns + + +| output column name | data type | description | +|-|-|-| +| the one specified in _output_embeddings_column_name_ configuration | `array[float]` | the embeddings vectors of the content | -## Running -### Parameters +## Configuration The transform can be tuned with the following parameters. @@ -18,7 +40,11 @@ The transform can be tuned with the following parameters. | `model_name` | `BAAI/bge-small-en-v1.5` | The HF model to use for encoding the text. | | `content_column_name` | `contents` | Name of the column containing the text to be encoded. | | `output_embeddings_column_name` | `embeddings` | Column name to store the embeddings in the output table. | -| `output_path_column_name` | `doc_path` | Column name to store the document path of the chunk in the output table. | + + +## Usage + +### Launched Command Line Options When invoking the CLI, the parameters must be set as `--text_encoder_`, e.g. `--text_encoder_column_name_key=myoutput`. @@ -43,8 +69,20 @@ ls output ``` To see results of the transform. +### Code example + +TBD (link to the notebook will be provided) + + ### Transforming data using the transform image To use the transform image to transform your data, please refer to the [running images quickstart](../../../../doc/quick-start/run-transform-image.md), substituting the name of this transform image and runtime as appropriate. + +## Testing + +Following [the testing strategy of data-processing-lib](../../../../data-processing-lib/doc/transform-testing.md) + +Currently we have: +- [Unit test](test/test_text_encoder_python.py) \ No newline at end of file From abec82359f8de5124082915e9d64993f34b60a33 Mon Sep 17 00:00:00 2001 From: Maroun Touma Date: Wed, 20 Nov 2024 13:27:21 -0500 Subject: [PATCH 05/12] Added notebook for pdf2parquet Signed-off-by: Maroun Touma --- .../language/pdf2parquet/pdf2parquet.ipynb | 212 ++++++++++++++++++ 1 file changed, 212 insertions(+) create mode 100644 transforms/language/pdf2parquet/pdf2parquet.ipynb diff --git a/transforms/language/pdf2parquet/pdf2parquet.ipynb b/transforms/language/pdf2parquet/pdf2parquet.ipynb new file mode 100644 index 000000000..1ba814170 --- /dev/null +++ b/transforms/language/pdf2parquet/pdf2parquet.ipynb @@ -0,0 +1,212 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "afd55886-5f5b-4794-838e-ef8179fb0394", + "metadata": {}, + "source": [ + "##### **** These pip install need to be adapted to use the appropriate release level. Alternatively, The venv running the jupyter lab could be pre-configured with a requirement file that includes the right release\n", + "\n", + "##### **** example for transform developers working from git clone\n", + "```\n", + "make venv\n", + "source venv/bin/activate && pip install jupyterlab\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "4c45c3c6-e4d7-4e61-8de6-32d61f2ce695", + "metadata": {}, + "outputs": [], + "source": [ + "%%capture\n", + "## This is here as a reference only\n", + "# Users and application developers must use the right tag for the latest from pypi\n", + "#!pip install data-prep-toolkit\n", + "#!pip install data-prep-toolkit-transforms\n", + "#!pip install data-prep-connector" + ] + }, + { + "cell_type": "markdown", + "id": "407fd4e4-265d-4ec7-bbc9-b43158f5f1f3", + "metadata": { + "jp-MarkdownHeadingCollapsed": true + }, + "source": [ + "##### **** Configure the transform parameters. We will only show the use of double_precision. For a complete list, please refer to the Readme.md for this transform\n", + "##### \n", + "| parameter:type | Description |\n", + "| --- | --- |\n", + "| data_files_to_use: list | list of file extensions in the input folder to use for running the transform |\n", + "|pdf2parquet_double_precision: int | control precision |\n" + ] + }, + { + "cell_type": "markdown", + "id": "ebf1f782-0e61-485c-8670-81066beb734c", + "metadata": {}, + "source": [ + "##### ***** Import required Classes and modules" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "c2a12abc-9460-4e45-8961-873b48a9ab19", + "metadata": {}, + "outputs": [], + "source": [ + "import ast\n", + "import os\n", + "import sys\n", + "\n", + "from data_processing.runtime.pure_python import PythonTransformLauncher\n", + "from data_processing.utils import ParamsUtils\n", + "from pdf2parquet_transform_python import Pdf2ParquetPythonTransformConfiguration\n" + ] + }, + { + "cell_type": "markdown", + "id": "7234563c-2924-4150-8a31-4aec98c1bf33", + "metadata": {}, + "source": [ + "##### ***** Setup runtime parameters for this transform" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "e90a853e-412f-45d7-af3d-959e755aeebb", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# create parameters\n", + "input_folder = os.path.join(\"python\", \"test-data\", \"input\")\n", + "output_folder = os.path.join( \"python\", \"output\")\n", + "local_conf = {\n", + " \"input_folder\": input_folder,\n", + " \"output_folder\": output_folder,\n", + "}\n", + "params = {\n", + " # Data access. Only required parameters are specified\n", + " \"data_local_config\": ParamsUtils.convert_to_ast(local_conf),\n", + " \"data_files_to_use\": ast.literal_eval(\"['.pdf','.docx','.pptx','.zip']\"),\n", + " # execution info\n", + " \"runtime_pipeline_id\": \"pipeline_id\",\n", + " \"runtime_job_id\": \"job_id\",\n", + " # pdf2parquet params\n", + " \"pdf2parquet_double_precision\": 0,\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "7949f66a-d207-45ef-9ad7-ad9406f8d42a", + "metadata": {}, + "source": [ + "##### ***** Use python runtime to invoke the transform" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "0775e400-7469-49a6-8998-bd4772931459", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "13:23:55 INFO - pdf2parquet parameters are : {'batch_size': -1, 'artifacts_path': None, 'contents_type': , 'do_table_structure': True, 'do_ocr': True, 'ocr_engine': , 'bitmap_area_threshold': 0.05, 'pdf_backend': , 'double_precision': 0}\n", + "13:23:55 INFO - pipeline id pipeline_id\n", + "13:23:55 INFO - code location None\n", + "13:23:55 INFO - data factory data_ is using local data access: input_folder - python/test-data/input output_folder - python/output\n", + "13:23:55 INFO - data factory data_ max_files -1, n_sample -1\n", + "13:23:55 INFO - data factory data_ Not using data sets, checkpointing False, max files -1, random samples -1, files to use ['.pdf', '.docx', '.pptx', '.zip'], files to checkpoint ['.parquet']\n", + "13:23:55 INFO - orchestrator pdf2parquet started at 2024-11-20 13:23:55\n", + "13:23:55 INFO - Number of files is 2, source profile {'max_file_size': 0.3013172149658203, 'min_file_size': 0.2757863998413086, 'total_file_size': 0.5771036148071289}\n", + "13:23:55 INFO - Initializing models\n", + "13:23:58 INFO - Processing archive_doc_filename='2305.03393v1-pg9.pdf' \n", + "13:23:59 INFO - Processing archive_doc_filename='2408.09869v1-pg1.pdf' \n", + "13:24:00 INFO - Completed 1 files (50.0%) in 0.029 min\n", + "13:24:03 INFO - Completed 2 files (100.0%) in 0.08 min\n", + "13:24:03 INFO - Done processing 2 files, waiting for flush() completion.\n", + "13:24:03 INFO - done flushing in 0.0 sec\n", + "13:24:03 INFO - Completed execution in 0.132 min, execution result 0\n" + ] + } + ], + "source": [ + "%%capture\n", + "sys.argv = ParamsUtils.dict_to_req(d=params)\n", + "launcher = PythonTransformLauncher(runtime_config=Pdf2ParquetPythonTransformConfiguration())\n", + "launcher.launch()\n" + ] + }, + { + "cell_type": "markdown", + "id": "c3df5adf-4717-4a03-864d-9151cd3f134b", + "metadata": {}, + "source": [ + "##### **** The specified folder will include the transformed parquet files." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "7276fe84-6512-4605-ab65-747351e13a7c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['python/output/redp5110-ch1.parquet',\n", + " 'python/output/metadata.json',\n", + " 'python/output/archive1.parquet']" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import glob\n", + "glob.glob(\"python/output/*\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fef6667e-71ed-4054-9382-55c6bb3fda70", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 956e5a075ebae406a0d2123aa18589cbf1a4bc62 Mon Sep 17 00:00:00 2001 From: Shahrokh Daijavad Date: Wed, 20 Nov 2024 11:14:05 -0800 Subject: [PATCH 06/12] Update pdf2parquet.ipynb Made a few changes to the comment cells that explain the execution of the immediate next cell --- transforms/language/pdf2parquet/pdf2parquet.ipynb | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/transforms/language/pdf2parquet/pdf2parquet.ipynb b/transforms/language/pdf2parquet/pdf2parquet.ipynb index 1ba814170..87e58c7b6 100644 --- a/transforms/language/pdf2parquet/pdf2parquet.ipynb +++ b/transforms/language/pdf2parquet/pdf2parquet.ipynb @@ -5,13 +5,13 @@ "id": "afd55886-5f5b-4794-838e-ef8179fb0394", "metadata": {}, "source": [ - "##### **** These pip install need to be adapted to use the appropriate release level. Alternatively, The venv running the jupyter lab could be pre-configured with a requirement file that includes the right release\n", - "\n", - "##### **** example for transform developers working from git clone\n", + "##### **** Example for transform developers working from git clone\n", "```\n", "make venv\n", "source venv/bin/activate && pip install jupyterlab\n", "```" + "##### **** The pip installs below need to be adapted to use the appropriate release level. Alternatively, the venv running the jupyter lab could be pre-configured with a requirement file that includes the right release\n", + "\n", ] }, { @@ -36,7 +36,7 @@ "jp-MarkdownHeadingCollapsed": true }, "source": [ - "##### **** Configure the transform parameters. We will only show the use of double_precision. For a complete list, please refer to the Readme.md for this transform\n", + "##### **** Configure the transform parameters. We will only show the use of double_precision. For a complete list, please refer to the README.md for this transform\n", "##### \n", "| parameter:type | Description |\n", "| --- | --- |\n", @@ -49,7 +49,7 @@ "id": "ebf1f782-0e61-485c-8670-81066beb734c", "metadata": {}, "source": [ - "##### ***** Import required Classes and modules" + "##### ***** Import required classes and modules" ] }, { From f776f1d39c0fd28df4f709f1a753b0ab9e34f50e Mon Sep 17 00:00:00 2001 From: Shahrokh Daijavad Date: Wed, 20 Nov 2024 11:27:39 -0800 Subject: [PATCH 07/12] Update pdf2parquet.ipynb Restored to a valid Notebook --- transforms/language/pdf2parquet/pdf2parquet.ipynb | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/transforms/language/pdf2parquet/pdf2parquet.ipynb b/transforms/language/pdf2parquet/pdf2parquet.ipynb index 87e58c7b6..1200e7a7f 100644 --- a/transforms/language/pdf2parquet/pdf2parquet.ipynb +++ b/transforms/language/pdf2parquet/pdf2parquet.ipynb @@ -5,13 +5,14 @@ "id": "afd55886-5f5b-4794-838e-ef8179fb0394", "metadata": {}, "source": [ - "##### **** Example for transform developers working from git clone\n", + "##### **** These pip install need to be adapted to use the appropriate release level. Alternatively, The venv running the jupyter lab could be pre-configured with a requirement file that includes the right release\n", + "\n", + "##### **** example: \n", "```\n", - "make venv\n", - "source venv/bin/activate && pip install jupyterlab\n", + "python -m venv && source venv/bin/activate\n", + "pip install -r requirements.txt\n", + "pip install jupyterlab\n", "```" - "##### **** The pip installs below need to be adapted to use the appropriate release level. Alternatively, the venv running the jupyter lab could be pre-configured with a requirement file that includes the right release\n", - "\n", ] }, { From 13dabd02ad2b95731d8f37535123bbc852277316 Mon Sep 17 00:00:00 2001 From: Maroun Touma Date: Wed, 20 Nov 2024 15:20:52 -0500 Subject: [PATCH 08/12] Added doc chunk minimal notebook Signed-off-by: Maroun Touma --- transforms/language/doc_chunk/doc_chunk.ipynb | 194 ++++++++++++++++++ 1 file changed, 194 insertions(+) create mode 100644 transforms/language/doc_chunk/doc_chunk.ipynb diff --git a/transforms/language/doc_chunk/doc_chunk.ipynb b/transforms/language/doc_chunk/doc_chunk.ipynb new file mode 100644 index 000000000..822d5b302 --- /dev/null +++ b/transforms/language/doc_chunk/doc_chunk.ipynb @@ -0,0 +1,194 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "afd55886-5f5b-4794-838e-ef8179fb0394", + "metadata": {}, + "source": [ + "##### **** These pip install need to be adapted to use the appropriate release level. Alternatively, The venv running the jupyter lab could be pre-configured with a requirement file that includes the right release\n", + "\n", + "##### **** example for transform developers working from git clone\n", + "```\n", + "make venv\n", + "source venv/bin/activate && pip install jupyterlab\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "4c45c3c6-e4d7-4e61-8de6-32d61f2ce695", + "metadata": {}, + "outputs": [], + "source": [ + "%%capture\n", + "## This is here as a reference only\n", + "# Users and application developers must use the right tag for the latest from pypi\n", + "#!pip install data-prep-toolkit\n", + "#!pip install data-prep-toolkit-transforms\n", + "#!pip install data-prep-connector" + ] + }, + { + "cell_type": "markdown", + "id": "407fd4e4-265d-4ec7-bbc9-b43158f5f1f3", + "metadata": { + "jp-MarkdownHeadingCollapsed": true + }, + "source": [ + "##### **** Configure the transform parameters. We will only show the use of double_precision. For a complete list, please refer to the Readme.md for this transform\n", + "##### \n", + "| parameter:type | value | Description |\n", + "| --- | --- | --- |\n", + "|data_files_to_use: list | .parquet | Process all parquet files in the input folder |\n", + "| doc_chunk_chunking_type: str | dl_json | |\n" + ] + }, + { + "cell_type": "markdown", + "id": "ebf1f782-0e61-485c-8670-81066beb734c", + "metadata": {}, + "source": [ + "##### ***** Import required Classes and modules" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "c2a12abc-9460-4e45-8961-873b48a9ab19", + "metadata": {}, + "outputs": [], + "source": [ + "import ast\n", + "import os\n", + "import sys\n", + "\n", + "from data_processing.runtime.pure_python import PythonTransformLauncher\n", + "from data_processing.utils import ParamsUtils\n", + "from doc_chunk_transform_python import DocChunkPythonTransformConfiguration\n" + ] + }, + { + "cell_type": "markdown", + "id": "7234563c-2924-4150-8a31-4aec98c1bf33", + "metadata": {}, + "source": [ + "##### ***** Setup runtime parameters for this transform" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "e90a853e-412f-45d7-af3d-959e755aeebb", + "metadata": {}, + "outputs": [], + "source": [ + "# create parameters\n", + "input_folder = os.path.join(\"python\", \"test-data\", \"input\")\n", + "output_folder = os.path.join( \"python\", \"output\")\n", + "local_conf = {\n", + " \"input_folder\": input_folder,\n", + " \"output_folder\": output_folder,\n", + "}\n", + "params = {\n", + " \"data_local_config\": ParamsUtils.convert_to_ast(local_conf),\n", + " \"data_files_to_use\": ast.literal_eval(\"['.parquet']\"),\n", + " \"runtime_pipeline_id\": \"pipeline_id\",\n", + " \"runtime_job_id\": \"job_id\",\n", + " \"doc_chunk_chunking_type\": \"dl_json\",\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "7949f66a-d207-45ef-9ad7-ad9406f8d42a", + "metadata": {}, + "source": [ + "##### ***** Use python runtime to invoke the transform" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "0775e400-7469-49a6-8998-bd4772931459", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "15:19:48 INFO - pipeline id pipeline_id\n", + "15:19:48 INFO - code location None\n", + "15:19:48 INFO - data factory data_ is using local data access: input_folder - python/test-data/input output_folder - python/output\n", + "15:19:48 INFO - data factory data_ max_files -1, n_sample -1\n", + "15:19:48 INFO - data factory data_ Not using data sets, checkpointing False, max files -1, random samples -1, files to use ['.parquet'], files to checkpoint ['.parquet']\n", + "15:19:48 INFO - orchestrator doc_chunk started at 2024-11-20 15:19:48\n", + "15:19:48 INFO - Number of files is 1, source profile {'max_file_size': 0.011513710021972656, 'min_file_size': 0.011513710021972656, 'total_file_size': 0.011513710021972656}\n", + "15:19:48 INFO - Completed 1 files (100.0%) in 0.001 min\n", + "15:19:48 INFO - Done processing 1 files, waiting for flush() completion.\n", + "15:19:48 INFO - done flushing in 0.0 sec\n", + "15:19:48 INFO - Completed execution in 0.001 min, execution result 0\n" + ] + } + ], + "source": [ + "%%capture\n", + "sys.argv = ParamsUtils.dict_to_req(d=params)\n", + "launcher = PythonTransformLauncher(runtime_config=DocChunkPythonTransformConfiguration())\n", + "launcher.launch()\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "id": "c3df5adf-4717-4a03-864d-9151cd3f134b", + "metadata": {}, + "source": [ + "##### **** The specified folder will include the transformed parquet files." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "7276fe84-6512-4605-ab65-747351e13a7c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['python/output/metadata.json', 'python/output/test1.parquet']" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import glob\n", + "glob.glob(\"python/output/*\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 0ddcca103d945bd1389dc1e89080093b4c3f0e0a Mon Sep 17 00:00:00 2001 From: Maroun Touma Date: Wed, 20 Nov 2024 15:46:26 -0500 Subject: [PATCH 09/12] minimal sample notebook for how transform can be invoked Signed-off-by: Maroun Touma --- .../language/text_encoder/text_encoder.ipynb | 191 ++++++++++++++++++ 1 file changed, 191 insertions(+) create mode 100644 transforms/language/text_encoder/text_encoder.ipynb diff --git a/transforms/language/text_encoder/text_encoder.ipynb b/transforms/language/text_encoder/text_encoder.ipynb new file mode 100644 index 000000000..4adff9edf --- /dev/null +++ b/transforms/language/text_encoder/text_encoder.ipynb @@ -0,0 +1,191 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "afd55886-5f5b-4794-838e-ef8179fb0394", + "metadata": {}, + "source": [ + "##### **** These pip install need to be adapted to use the appropriate release level. Alternatively, The venv running the jupyter lab could be pre-configured with a requirement file that includes the right release\n", + "\n", + "##### **** example: \n", + "```\n", + "python -m venv && source venv/bin/activate\n", + "pip install -r requirements.txt\n", + "pip install jupyterlab\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "4c45c3c6-e4d7-4e61-8de6-32d61f2ce695", + "metadata": {}, + "outputs": [], + "source": [ + "%%capture\n", + "## This is here as a reference only\n", + "# Users and application developers must use the right tag for the latest from pypi\n", + "#!pip install data-prep-toolkit\n", + "#!pip install data-prep-toolkit-transforms\n", + "#!pip install data-prep-connector" + ] + }, + { + "cell_type": "markdown", + "id": "407fd4e4-265d-4ec7-bbc9-b43158f5f1f3", + "metadata": { + "jp-MarkdownHeadingCollapsed": true + }, + "source": [ + "##### **** Configure the transform parameters. We will only show the use of double_precision. For a complete list, please refer to the README.md for this transform\n", + "##### \n", + "| parameter:type | Description |\n", + "| --- | --- |\n", + "| data_files_to_use: list | list of file extensions in the input folder to use for running the transform |\n", + "|pdf2parquet_double_precision: int | control precision |\n" + ] + }, + { + "cell_type": "markdown", + "id": "ebf1f782-0e61-485c-8670-81066beb734c", + "metadata": {}, + "source": [ + "##### ***** Import required classes and modules" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "c2a12abc-9460-4e45-8961-873b48a9ab19", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import sys\n", + "\n", + "from data_processing.runtime.pure_python import PythonTransformLauncher\n", + "from data_processing.utils import ParamsUtils\n", + "from text_encoder_transform_python import TextEncoderPythonTransformConfiguration\n" + ] + }, + { + "cell_type": "markdown", + "id": "7234563c-2924-4150-8a31-4aec98c1bf33", + "metadata": {}, + "source": [ + "##### ***** Setup runtime parameters for this transform" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "e90a853e-412f-45d7-af3d-959e755aeebb", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "input_folder = os.path.join (\"python\", \"test-data\", \"input\")\n", + "output_folder = os.path.join( \"python\", \"output\")\n", + "local_conf = {\n", + " \"input_folder\": input_folder,\n", + " \"output_folder\": output_folder,\n", + "}\n", + "params = {\n", + " \"data_local_config\": ParamsUtils.convert_to_ast(local_conf),\n", + " \"runtime_pipeline_id\": \"pipeline_id\",\n", + " \"runtime_job_id\": \"job_id\",\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "7949f66a-d207-45ef-9ad7-ad9406f8d42a", + "metadata": {}, + "source": [ + "##### ***** Use python runtime to invoke the transform" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "0775e400-7469-49a6-8998-bd4772931459", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "15:44:57 INFO - pipeline id pipeline_id\n", + "15:44:57 INFO - code location None\n", + "15:44:57 INFO - data factory data_ is using local data access: input_folder - python/test-data/input output_folder - python/output\n", + "15:44:57 INFO - data factory data_ max_files -1, n_sample -1\n", + "15:44:57 INFO - data factory data_ Not using data sets, checkpointing False, max files -1, random samples -1, files to use ['.parquet'], files to checkpoint ['.parquet']\n", + "15:44:57 INFO - orchestrator text_encoder started at 2024-11-20 15:44:57\n", + "15:44:57 INFO - Number of files is 1, source profile {'max_file_size': 0.0010089874267578125, 'min_file_size': 0.0010089874267578125, 'total_file_size': 0.0010089874267578125}\n", + "15:44:58 INFO - Completed 1 files (100.0%) in 0.003 min\n", + "15:44:58 INFO - Done processing 1 files, waiting for flush() completion.\n", + "15:44:58 INFO - done flushing in 0.0 sec\n", + "15:44:58 INFO - Completed execution in 0.017 min, execution result 0\n" + ] + } + ], + "source": [ + "%%capture\n", + "sys.argv = ParamsUtils.dict_to_req(d=params)\n", + "launcher = PythonTransformLauncher(runtime_config=TextEncoderPythonTransformConfiguration())\n", + "launcher.launch()\n" + ] + }, + { + "cell_type": "markdown", + "id": "c3df5adf-4717-4a03-864d-9151cd3f134b", + "metadata": {}, + "source": [ + "##### **** The specified folder will include the transformed parquet files." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "7276fe84-6512-4605-ab65-747351e13a7c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['python/output/metadata.json', 'python/output/test1.parquet']" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import glob\n", + "glob.glob(\"python/output/*\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 11ed291c705d6e87b9fd1b1e892ff5d05e1a7a00 Mon Sep 17 00:00:00 2001 From: SHAHROKH DAIJAVAD Date: Wed, 20 Nov 2024 15:24:51 -0800 Subject: [PATCH 10/12] restoring the make venv Signed-off-by: SHAHROKH DAIJAVAD --- .../language/pdf2parquet/pdf2parquet.ipynb | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/transforms/language/pdf2parquet/pdf2parquet.ipynb b/transforms/language/pdf2parquet/pdf2parquet.ipynb index 1200e7a7f..e5548eb4c 100644 --- a/transforms/language/pdf2parquet/pdf2parquet.ipynb +++ b/transforms/language/pdf2parquet/pdf2parquet.ipynb @@ -9,8 +9,8 @@ "\n", "##### **** example: \n", "```\n", - "python -m venv && source venv/bin/activate\n", - "pip install -r requirements.txt\n", + "make venv \n", + "source venv/bin/activate \n", "pip install jupyterlab\n", "```" ] @@ -122,22 +122,22 @@ "name": "stderr", "output_type": "stream", "text": [ - "13:23:55 INFO - pdf2parquet parameters are : {'batch_size': -1, 'artifacts_path': None, 'contents_type': , 'do_table_structure': True, 'do_ocr': True, 'ocr_engine': , 'bitmap_area_threshold': 0.05, 'pdf_backend': , 'double_precision': 0}\n", - "13:23:55 INFO - pipeline id pipeline_id\n", - "13:23:55 INFO - code location None\n", - "13:23:55 INFO - data factory data_ is using local data access: input_folder - python/test-data/input output_folder - python/output\n", - "13:23:55 INFO - data factory data_ max_files -1, n_sample -1\n", - "13:23:55 INFO - data factory data_ Not using data sets, checkpointing False, max files -1, random samples -1, files to use ['.pdf', '.docx', '.pptx', '.zip'], files to checkpoint ['.parquet']\n", - "13:23:55 INFO - orchestrator pdf2parquet started at 2024-11-20 13:23:55\n", - "13:23:55 INFO - Number of files is 2, source profile {'max_file_size': 0.3013172149658203, 'min_file_size': 0.2757863998413086, 'total_file_size': 0.5771036148071289}\n", - "13:23:55 INFO - Initializing models\n", - "13:23:58 INFO - Processing archive_doc_filename='2305.03393v1-pg9.pdf' \n", - "13:23:59 INFO - Processing archive_doc_filename='2408.09869v1-pg1.pdf' \n", - "13:24:00 INFO - Completed 1 files (50.0%) in 0.029 min\n", - "13:24:03 INFO - Completed 2 files (100.0%) in 0.08 min\n", - "13:24:03 INFO - Done processing 2 files, waiting for flush() completion.\n", - "13:24:03 INFO - done flushing in 0.0 sec\n", - "13:24:03 INFO - Completed execution in 0.132 min, execution result 0\n" + "15:13:18 INFO - pdf2parquet parameters are : {'batch_size': -1, 'artifacts_path': None, 'contents_type': , 'do_table_structure': True, 'do_ocr': True, 'ocr_engine': , 'bitmap_area_threshold': 0.05, 'pdf_backend': , 'double_precision': 0}\n", + "15:13:18 INFO - pipeline id pipeline_id\n", + "15:13:18 INFO - code location None\n", + "15:13:18 INFO - data factory data_ is using local data access: input_folder - python/test-data/input output_folder - python/output\n", + "15:13:18 INFO - data factory data_ max_files -1, n_sample -1\n", + "15:13:18 INFO - data factory data_ Not using data sets, checkpointing False, max files -1, random samples -1, files to use ['.pdf', '.docx', '.pptx', '.zip'], files to checkpoint ['.parquet']\n", + "15:13:18 INFO - orchestrator pdf2parquet started at 2024-11-20 15:13:18\n", + "15:13:18 INFO - Number of files is 2, source profile {'max_file_size': 0.3013172149658203, 'min_file_size': 0.2757863998413086, 'total_file_size': 0.5771036148071289}\n", + "15:13:18 INFO - Initializing models\n", + "15:14:08 INFO - Processing archive_doc_filename='2305.03393v1-pg9.pdf' \n", + "15:14:09 INFO - Processing archive_doc_filename='2408.09869v1-pg1.pdf' \n", + "15:14:10 INFO - Completed 1 files (50.0%) in 0.04 min\n", + "15:14:18 INFO - Completed 2 files (100.0%) in 0.179 min\n", + "15:14:18 INFO - Done processing 2 files, waiting for flush() completion.\n", + "15:14:18 INFO - done flushing in 0.0 sec\n", + "15:14:18 INFO - Completed execution in 1.007 min, execution result 0\n" ] } ], @@ -205,7 +205,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.10" + "version": "3.10.8" } }, "nbformat": 4, From a7e8d318d77196499098a100563cccbc26b92d57 Mon Sep 17 00:00:00 2001 From: SHAHROKH DAIJAVAD Date: Wed, 20 Nov 2024 15:58:01 -0800 Subject: [PATCH 11/12] unification of notebooks Signed-off-by: SHAHROKH DAIJAVAD --- transforms/language/doc_chunk/doc_chunk.ipynb | 6 ++---- transforms/language/pdf2parquet/pdf2parquet.ipynb | 9 ++++----- .../language/text_encoder/text_encoder.ipynb | 15 ++++----------- 3 files changed, 10 insertions(+), 20 deletions(-) diff --git a/transforms/language/doc_chunk/doc_chunk.ipynb b/transforms/language/doc_chunk/doc_chunk.ipynb index 822d5b302..3a8466037 100644 --- a/transforms/language/doc_chunk/doc_chunk.ipynb +++ b/transforms/language/doc_chunk/doc_chunk.ipynb @@ -5,9 +5,7 @@ "id": "afd55886-5f5b-4794-838e-ef8179fb0394", "metadata": {}, "source": [ - "##### **** These pip install need to be adapted to use the appropriate release level. Alternatively, The venv running the jupyter lab could be pre-configured with a requirement file that includes the right release\n", - "\n", - "##### **** example for transform developers working from git clone\n", + "##### **** These pip installs need to be adapted to use the appropriate release level. Alternatively, The venv running the jupyter lab could be pre-configured with a requirement file that includes the right release. Example for transform developers working from git clone:\n", "```\n", "make venv\n", "source venv/bin/activate && pip install jupyterlab\n", @@ -36,7 +34,7 @@ "jp-MarkdownHeadingCollapsed": true }, "source": [ - "##### **** Configure the transform parameters. We will only show the use of double_precision. For a complete list, please refer to the Readme.md for this transform\n", + "##### **** Configure the transform parameters. We will only show the use of data_files_to_use and doc_chunk_chunking_type. For a complete list of parameters, please refer to the README.md for this transform\n", "##### \n", "| parameter:type | value | Description |\n", "| --- | --- | --- |\n", diff --git a/transforms/language/pdf2parquet/pdf2parquet.ipynb b/transforms/language/pdf2parquet/pdf2parquet.ipynb index e5548eb4c..2d26741b3 100644 --- a/transforms/language/pdf2parquet/pdf2parquet.ipynb +++ b/transforms/language/pdf2parquet/pdf2parquet.ipynb @@ -5,9 +5,7 @@ "id": "afd55886-5f5b-4794-838e-ef8179fb0394", "metadata": {}, "source": [ - "##### **** These pip install need to be adapted to use the appropriate release level. Alternatively, The venv running the jupyter lab could be pre-configured with a requirement file that includes the right release\n", - "\n", - "##### **** example: \n", + "##### **** These pip installs need to be adapted to use the appropriate release level. Alternatively, The venv running the jupyter lab could be pre-configured with a requirement file that includes the right release. Example for transform developers working from git clone:\n", "```\n", "make venv \n", "source venv/bin/activate \n", @@ -37,12 +35,13 @@ "jp-MarkdownHeadingCollapsed": true }, "source": [ - "##### **** Configure the transform parameters. We will only show the use of double_precision. For a complete list, please refer to the README.md for this transform\n", + "##### **** Configure the transform parameters. We will only show the use of double_precision. For a complete list, please refer to the README.md for this transform.\n", "##### \n", "| parameter:type | Description |\n", "| --- | --- |\n", "| data_files_to_use: list | list of file extensions in the input folder to use for running the transform |\n", - "|pdf2parquet_double_precision: int | control precision |\n" + "|pdf2parquet_double_precision: int | If set, all floating points (e.g. bounding boxes) are rounded to this precision. For tests it is advised to use 0 |\n", + "\n" ] }, { diff --git a/transforms/language/text_encoder/text_encoder.ipynb b/transforms/language/text_encoder/text_encoder.ipynb index 4adff9edf..aca309594 100644 --- a/transforms/language/text_encoder/text_encoder.ipynb +++ b/transforms/language/text_encoder/text_encoder.ipynb @@ -5,12 +5,10 @@ "id": "afd55886-5f5b-4794-838e-ef8179fb0394", "metadata": {}, "source": [ - "##### **** These pip install need to be adapted to use the appropriate release level. Alternatively, The venv running the jupyter lab could be pre-configured with a requirement file that includes the right release\n", - "\n", - "##### **** example: \n", + "##### **** These pip installs need to be adapted to use the appropriate release level. Alternatively, The venv running the jupyter lab could be pre-configured with a requirement file that includes the right release. Example for transform developers working from git clone:\n", "```\n", - "python -m venv && source venv/bin/activate\n", - "pip install -r requirements.txt\n", + "make venv \n", + "source venv/bin/activate \n", "pip install jupyterlab\n", "```" ] @@ -37,12 +35,7 @@ "jp-MarkdownHeadingCollapsed": true }, "source": [ - "##### **** Configure the transform parameters. We will only show the use of double_precision. For a complete list, please refer to the README.md for this transform\n", - "##### \n", - "| parameter:type | Description |\n", - "| --- | --- |\n", - "| data_files_to_use: list | list of file extensions in the input folder to use for running the transform |\n", - "|pdf2parquet_double_precision: int | control precision |\n" + "##### **** Configure the transform parameters. For this notebook, we use all the default parameters. For a complete list of parameters, please refer to the README.md for this transform.\n" ] }, { From 3c6466a61ebdce557c92136ce6f5a1aa1790629b Mon Sep 17 00:00:00 2001 From: Maroun Touma Date: Fri, 22 Nov 2024 18:12:50 -0500 Subject: [PATCH 12/12] added constraint for pydantic to prevent llama-index-core from picking up 2.10 Signed-off-by: Maroun Touma --- transforms/language/doc_chunk/python/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/transforms/language/doc_chunk/python/requirements.txt b/transforms/language/doc_chunk/python/requirements.txt index dd076d0e0..c24d0c3e2 100644 --- a/transforms/language/doc_chunk/python/requirements.txt +++ b/transforms/language/doc_chunk/python/requirements.txt @@ -1,3 +1,4 @@ data-prep-toolkit==0.2.2.dev2 docling-core==2.3.0 +pydantic>=2.0.0,<2.10.0 llama-index-core>=0.11.22,<0.12.0