Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature/improve speed #55

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 2 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -78,16 +78,12 @@
- Easy to use
- Easy to deploy

## 💻 Screenshots
## 💻 Demo


<p align="center">
<a href="https://prometh.ai">
<img src="assets/img_5.png" alt="Description1" width="15%" height="50%" />
<img src="assets/img_1.png" alt="Description2" width="15%" height="50%" />
<img src="assets/img_2.png" alt="Description3" width="15%" height="50%" />
<img src="assets/img_3.png" alt="Description4" width="15%" height="50%" />
<img src="assets/img_4.png" alt="Description4" width="15%" height="50%" />
<img src="https://promethai-public-assets.s3.eu-west-1.amazonaws.com/product_demo-min.gif" alt="AI assistant" width="25%" height="50%"/>
</a>
</p>

Expand Down
4 changes: 3 additions & 1 deletion examples/level_2/.env.template
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,6 @@ OPENAI_TEMPERATURE=0.0
REDIS_HOST=redis
ZAPIER_NLA_API_KEY
LOCAL_DEV = False
REPLICATE_API_TOKEN
REPLICATE_API_TOKEN
WEAVIATE_URL
WEAVIATE_API_KEY
14 changes: 12 additions & 2 deletions examples/level_2/Readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,5 +30,15 @@ The fast API endpoint accepts prompts and PDF files and returns a JSON object wi
-X POST
-F "prompt=The quick brown fox"
-F "file=@/path/to/file.pdf"
http://localhost:8000/generate/
```
http://localhost:8000/upload/
```

{
"payload": {
"user_id": "681",
"session_id": "471",
"model_speed": "slow",
"prompt": "Temperature=Cold;Food Type=Ice Cream",
"pdf_url": "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
}
}
98 changes: 77 additions & 21 deletions examples/level_2/api.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from langchain.document_loaders import PyPDFLoader

from level_2_pdf_vectorstore__dlt_contracts import ShortTermMemory
from level_2_pdf_vectorstore__dlt_contracts import Memory
from fastapi import FastAPI
from fastapi.responses import JSONResponse
from pydantic import BaseModel
Expand All @@ -15,8 +15,7 @@
from fastapi import HTTPException
from fastapi import FastAPI, UploadFile, File
from typing import List
from level_2_pdf_vectorstore__dlt_contracts import ShortTermMemory

import requests
# Set up logging
logging.basicConfig(
level=logging.INFO, # Set the logging level (e.g., DEBUG, INFO, WARNING, ERROR, CRITICAL)
Expand All @@ -36,8 +35,6 @@
from fastapi import Depends


class Payload(BaseModel):
payload: str
class ImageResponse(BaseModel):
success: bool
message: str
Expand Down Expand Up @@ -66,39 +63,98 @@ def health_check():

#curl -X POST -H "Content-Type: application/json" -d '{"data": "YourPayload"}' -F "files=@/path/to/your/pdf/file.pdf" http://127.0.0.1:8000/upload/

@app.post("/upload/")
from fastapi import FastAPI, UploadFile, File
import requests
import os
import json

app = FastAPI()


from io import BytesIO


class Payload(BaseModel):
payload: Dict[str, Any]

@app.post("/upload/", response_model=dict)
async def upload_pdf_and_payload(
payload: Payload,
files: List[UploadFile] = File(...),
# files: List[UploadFile] = File(...),
):
try:
# Process the payload
payload_data = payload.payload
decoded_payload = payload.payload
# except:
# pass
#
# return JSONResponse(content={"response": decoded_payload}, status_code=200)

# Download the remote PDF if URL is provided
if 'pdf_url' in decoded_payload:
pdf_response = requests.get(decoded_payload['pdf_url'])
pdf_content = pdf_response.content

logging.info("Downloaded PDF from URL")

# Create an in-memory file-like object for the PDF content
pdf_stream = BytesIO(pdf_content)

contents = pdf_stream.read()

decoded_payload = json.loads(payload_data)
# Process each uploaded PDF file
results = []
for file in files:
contents = await file.read()
tmp_location = os.path.join('/tmp', "tmp.pdf")
with open(tmp_location, 'wb') as tmp_file:
tmp_file.write(contents)

logging.info("Wrote PDF from URL")

# Process the PDF using PyPDFLoader
loader = PyPDFLoader(tmp_location)
pages = loader.load_and_split()
logging.info(" PDF split into pages")
Memory_ = Memory(index_name="my-agent", user_id='555' )
await Memory_.async_init()
Memory_._run_buffer(user_input="I want to get a schema for my data")

stm = ShortTermMemory( user_id=decoded_payload['user_id'])
stm.episodic_buffer.main_buffer(prompt=decoded_payload['prompt'], pages=pages)
# Here you can perform your processing on the PDF contents
results.append({"filename": file.filename, "size": len(contents)})

return {"message": "Upload successful", "results": results}

except Exception as e:
return {"error": str(e)}
# Run the buffer
response = Memory_._run_buffer(user_input="I want to get a schema for my data")
return JSONResponse(content={"response": response}, status_code=200)

#to do: add the user id to the payload
#to do add the raw pdf to payload
# bb = await Memory_._run_buffer(user_input=decoded_payload['prompt'])
# print(bb)


except Exception as e:

return {"error": str(e)}
# Here you can perform your processing on the PDF contents
# results.append({"filename": file.filename, "size": len(contents)})

# Append the in-memory file to the files list
# files.append(UploadFile(pdf_stream, filename="downloaded.pdf"))
#
# # Process each uploaded PDF file
# results = []
# for file in files:
# contents = await file.read()
# tmp_location = os.path.join('/tmp', "tmp.pdf")
# with open(tmp_location, 'wb') as tmp_file:
# tmp_file.write(contents)
# loader = PyPDFLoader(tmp_location)
# pages = loader.load_and_split()
#
# stm = ShortTermMemory(user_id=decoded_payload['user_id'])
# stm.episodic_buffer.main_buffer(prompt=decoded_payload['prompt'], pages=pages)
# # Here you can perform your processing on the PDF contents
# results.append({"filename": file.filename, "size": len(contents)})
#
# return {"message": "Upload successful", "results": results}
#
# except Exception as e:
# return {"error": str(e)}


# @app.post("/clear-cache", response_model=dict)
Expand Down
Loading