forked from HTN-2024-AI-App/NoteHacks
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
364 lines (280 loc) · 10.6 KB
/
main.py
File metadata and controls
364 lines (280 loc) · 10.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
from fastapi import FastAPI, UploadFile, File
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import io
import os
from groq import Groq
import dotenv
import threading
import base64
import cv2
import os
from openai import OpenAI
import json
import uvicorn
from typing import List
dotenv.load_dotenv()
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# --- audio_transcription.py ---
# Initialize Groq client
client = Groq(api_key=os.environ["GROQ_API_KEY"])
async def transcribe_audio_stream(audio_chunk):
try:
transcription = client.audio.transcriptions.create(
file=audio_chunk,
model="distil-whisper-large-v3-en",
response_format="text",
language="en",
)
return transcription
except Exception as e:
print(f"Error in transcription: {e}")
return ""
def summarize(
old_summary: str, text_chunks: List[str], conciseness_delta: int = 0
) -> str:
conciseness_delta = int(conciseness_delta)
if conciseness_delta == 0:
change_conciseness = ""
else:
delta = "more" if conciseness_delta < 0 else "less"
change_conciseness = f"Make this new text passage {delta} detailed."
completion = client.chat.completions.create(
messages=[
{
"role": "system",
"content": """
You will be given a text to summarize. Follow these steps:
1. Output a single heading and paragraph, these should summarize the information in the provided text
a. Write an informative heading that clearly indicates the main topic or key idea of the provided text.
b. Write a concise paragraph summarizing the main points of the new text.
2. Format the entire summary as a Markdown document:
- Use only H2 (##) for headings. No other heading levels are allowed.
- Use regular text for paragraphs. Each heading should be followed by a paragraph.
- Always follow a heading with a paragraph
- Separate each heading and paragraph with a blank line
3. Ensure each heading is detailed and informative, providing a clear idea of the paragraph's content without needing to read it.
4. Keep each paragraph concise and focused on the main points of the text.
5. Your response should contain only:
a. The new or updated summary point
Both formatted in Markdown as described above. Do not include any additional explanations or comments.""",
},
{
"role": "user",
"content": f"""
text: {' '.join(text_chunks)}""",
},
],
model="llama3-8b-8192",
max_tokens=5000, # Set your desired maximum token output size here
)
content = completion.choices[0].message.content
content = old_summary + content
print("CONTENT", content)
return content
texts = []
last_seen = 0
curr_summary = ""
@app.post("/api/transcribe")
async def upload_audio(file: UploadFile = File(...)):
# TODO: segment the audio stuffs
audio_data = await file.read()
audio_io = io.BytesIO(audio_data)
audio_io.name = "audio.wav" # Groq API requires a filename
transcription = await transcribe_audio_stream(audio_io)
texts.append(transcription)
print(texts)
return JSONResponse(content={"transcription": transcription})
## -- sumarization --
@app.get("/api/summarize")
async def summarize_audio(conciseness_delta=0):
# e.g.: /api/summarize?conciseness_delta=0
global texts
global curr_summary
global last_seen
if len(texts) == 0:
return JSONResponse(content={"summary": "No transcriptions available"})
curr_summary = summarize(curr_summary, texts[last_seen:], conciseness_delta)
last_seen = len(texts)
return JSONResponse(content={"summary": curr_summary})
## -- ask a question --
@app.post("/api/ask")
async def ask_question(request: dict):
print(request)
context, question, history = (
request["context"],
request["question"],
request["questionHistory"],
)
res = client.chat.completions.create(
messages=[
{
"role": "system",
"content": f"You are given the following context: '{context}' and the user is giving you inputs based on this context. The conversation, starting from the user and alternating with the system, has been: '{history}'.",
},
{"role": "user", "content": question},
],
model="llama3-8b-8192",
)
return JSONResponse(content={"response": res.choices[0].message.content})
# --- facedetection.py --
# Load pre-trained Haar cascades for face and eye detection
face_cascade = cv2.CascadeClassifier(
cv2.data.haarcascades + "haarcascade_frontalface_default.xml"
)
eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_eye.xml")
# Global variable to store the latest result
latest_result = False
def are_eyes_visible(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for x, y, w, h in faces:
roi_gray = gray[y : y + h, x : x + w]
eyes = eye_cascade.detectMultiScale(roi_gray)
if len(eyes) > 0:
return True
return False
def face_detection_loop():
global latest_result
cap = cv2.VideoCapture(0) # Use default camera
while True:
ret, frame = cap.read()
if not ret:
break
latest_result = are_eyes_visible(frame)
print(f"LOOKING: {latest_result}")
cap.release()
@app.get("/face-detection")
def get_latest_result():
return JSONResponse(content={"res": latest_result})
# starting thread
camera_thread = threading.Thread(target=face_detection_loop)
camera_thread.daemon = (
True # Set as a daemon thread so it will close when the main program exits
)
camera_thread.start()
print("STARTED")
# ----- End of facedetection.py -----
latest_result_2 = {
"handsPrayer": False,
"thumbsUp": False,
"fist": False,
"stopSign": False,
}
# Function to encode the image
def encode_image(image_array):
_, buffer = cv2.imencode(".jpg", image_array)
return base64.b64encode(buffer).decode("utf-8")
def capture_and_query_chatgpt(
prompt, image_base64, model="gpt-4o-mini", max_tokens=300
):
# Initialize the OpenAI client
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
# Prepare the messages for the API request
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{image_base64}"},
},
],
}
]
try:
# Send the request to the ChatGPT API
response = client.chat.completions.create(
model=model, messages=messages, max_tokens=max_tokens
)
# Return the content of the response
return response.choices[0].message.content
except Exception as e:
return f"Error: {str(e)}"
def query_groq(prompt, base64_image):
client = Groq()
chat_completion = client.chat.completions.create(
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}",
},
},
],
}
],
model="llava-v1.5-7b-4096-preview",
)
response_content = chat_completion.choices[0].message.content
try:
response_json = json.loads(response_content)
except json.JSONDecodeError as e:
# Handle cases where the content is not valid JSON
print(e)
print(response_content)
response_json = {"error": "Invalid JSON response"}
return response_json
def gesture_loop():
global latest_result_2
cap = cv2.VideoCapture(0) # Use default camera
prompt = """Analyze the image and provide a JSON response with the following information:
1. Determine if the person in the image has their hands positioned together in a gesture resembling prayer. This includes cases where:
- The hands are partially visible, possibly cut off by the edges of the image.
- The hands are joined or touching in a prayer-like position, with palms or fingers pressed together.
2. Identify if there is a 'thumbs up' gesture visible in the image.
3. Detect if a closed fist is present in the image.
4. Recognize if there is a hand gesture resembling a stop sign (palm facing forward with fingers extended).
The analysis should consider various orientations and positions of the hands to accurately detect these gestures.
Return the results strictly in the following JSON format:
{
"handsPrayer": true or false,
"thumbsUp": true or false,
"fist": true or false,
"stopSign": true or false
}
Ensure the JSON string contains no additional text or deviations from this format. Also don't include backticks."""
while True:
ret, frame = cap.read()
if not ret:
break
# Capture and query ChatGPT
base64_image = encode_image(frame)
result = capture_and_query_chatgpt(prompt, base64_image)
try:
latest_result_2 = json.loads(result)
except json.JSONDecodeError:
print(f"Error parsing JSON: {result}")
latest_result_2 = {
"handsPrayer": False,
"thumbsUp": False,
"fist": False,
"stopSign": False,
}
# print("GESTURES", latest_result_2)
cap.release()
cv2.destroyAllWindows()
@app.get("/gesture-recognition")
async def get_latest_result_2():
return latest_result_2 # Now returns the JSON object directly
# starting thread
camera_thread = threading.Thread(target=gesture_loop)
camera_thread.daemon = (
True # Set as a daemon thread so it will close when the main program exits
)
camera_thread.start()
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
# To run the server, use: uvicorn main:app --reload