Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Migration to Google Generative AI #6

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
migration to google-genai
  • Loading branch information
WilfredoAaronSosaRamosBoushtech committed Jul 9, 2024
commit 3ee56c64b6bd592e9ff99164b2b5446d18f46c68
4 changes: 3 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
streamlit
chromadb
langchain
langchain-google-vertexai
langchain-core
langchain_community
langchain-google-genai
pypdf
4 changes: 1 addition & 3 deletions tasks/task_10/task_10.py
Original file line number Diff line number Diff line change
@@ -12,9 +12,7 @@
if __name__ == "__main__":

embed_config = {
"model_name": "textembedding-gecko@003",
"project": "YOUR-PROJECT-ID-HERE",
"location": "us-central1"
"model_name": "models/embedding-001"
}

# Add Session State
24 changes: 11 additions & 13 deletions tasks/task_4/task_4.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,20 @@
# embedding_client.py

from langchain_google_vertexai import VertexAIEmbeddings
from langchain_google_genai import GoogleGenerativeAIEmbeddings

class EmbeddingClient:
"""
Task: Initialize the EmbeddingClient class to connect to Google Cloud's VertexAI for text embeddings.

The EmbeddingClient class should be capable of initializing an embedding client with specific configurations
for model name, project, and location. Your task is to implement the __init__ method based on the provided
parameters. This setup will allow the class to utilize Google Cloud's VertexAIEmbeddings for processing text queries.
parameters. This setup will allow the class to utilize Google Cloud's GoogleGenerativeAIEmbeddings for processing text queries.

Steps:
1. Implement the __init__ method to accept 'model_name', 'project', and 'location' parameters.
These parameters are crucial for setting up the connection to the VertexAIEmbeddings service.
These parameters are crucial for setting up the connection to the GoogleGenerativeAIEmbeddings service.

2. Within the __init__ method, initialize the 'self.client' attribute as an instance of VertexAIEmbeddings
2. Within the __init__ method, initialize the 'self.client' attribute as an instance of GoogleGenerativeAIEmbeddings
using the provided parameters. This attribute will be used to embed queries.

Parameters:
@@ -23,17 +23,17 @@ class EmbeddingClient:
- location: The location of the Google Cloud project, such as 'us-central1'.

Instructions:
- Carefully initialize the 'self.client' with VertexAIEmbeddings in the __init__ method using the parameters.
- Carefully initialize the 'self.client' with GoogleGenerativeAIEmbeddings in the __init__ method using the parameters.
- Pay attention to how each parameter is used to configure the embedding client.

Note: The 'embed_query' method has been provided for you. Focus on correctly initializing the class.
"""

def __init__(self, model_name, project, location):
# Initialize the VertexAIEmbeddings client with the given parameters
# Read about the VertexAIEmbeddings wrapper from Langchain here
def __init__(self, model_name):
# Initialize the GoogleGenerativeAIEmbeddings client with the given parameters
# Read about the GoogleGenerativeAIEmbeddings wrapper from Langchain here
# https://python.langchain.com/docs/integrations/text_embedding/google_generative_ai
self.client = VertexAIEmbeddings(
self.client = GoogleGenerativeAIEmbeddings(
#### YOUR CODE HERE ####
)

@@ -61,11 +61,9 @@ def embed_documents(self, documents):
return None

if __name__ == "__main__":
model_name = "textembedding-gecko@003"
project = "YOUR PROJECT ID HERE"
location = "us-central1"
model_name = "models/embedding-001"

embedding_client = EmbeddingClient(model_name, project, location)
embedding_client = EmbeddingClient(model_name)
vectors = embedding_client.embed_query("Hello World!")
if vectors:
print(vectors)
4 changes: 1 addition & 3 deletions tasks/task_5/task_5.py
Original file line number Diff line number Diff line change
@@ -92,9 +92,7 @@ def query_chroma_collection(self, query) -> Document:
processor.ingest_documents()

embed_config = {
"model_name": "textembedding-gecko@003",
"project": "YOUR PROJECT ID HERE",
"location": "us-central1"
"model_name": "models/embedding-001"
}

embed_client = EmbeddingClient(**embed_config) # Initialize from Task 4
4 changes: 1 addition & 3 deletions tasks/task_6/task_6.py
Original file line number Diff line number Diff line change
@@ -44,9 +44,7 @@

# Configuration for EmbeddingClient
embed_config = {
"model_name": "textembedding-gecko@003",
"project": "YOUR PROJECT ID HERE",
"location": "us-central1"
"model_name": "models/embedding-001"
}

screen = st.empty() # Screen 1, ingest documents
10 changes: 4 additions & 6 deletions tasks/task_7/task_7.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import streamlit as st
from langchain_google_vertexai import VertexAI
from langchain_google_genai import GoogleGenerativeAI
from langchain_core.prompts import PromptTemplate
import os
import sys
@@ -65,12 +65,12 @@ def init_llm(self):
4. Initialize the LLM with the specified parameters to be ready for generating quiz questions.

Implementation:
- Use the VertexAI class to create an instance of the LLM with the specified configurations.
- Use the GoogleGenerativeAI class to create an instance of the LLM with the specified configurations.
- Assign the created LLM instance to the 'self.llm' attribute for later use in question generation.

Note: Ensure you have appropriate access or API keys if required by the model or platform.
"""
self.llm = VertexAI(
self.llm = GoogleGenerativeAI(
############# YOUR CODE HERE ############
)

@@ -140,9 +140,7 @@ def generate_question_with_vectorstore(self):


embed_config = {
"model_name": "textembedding-gecko@003",
"project": "YOUR-PROJECT-ID-HERE",
"location": "us-central1"
"model_name": "models/embedding-001"
}

screen = st.empty()
10 changes: 4 additions & 6 deletions tasks/task_8/task_8.py
Original file line number Diff line number Diff line change
@@ -8,7 +8,7 @@
from tasks.task_5.task_5 import ChromaCollectionCreator

from langchain_core.prompts import PromptTemplate
from langchain_google_vertexai import VertexAI
from langchain_google_genai import GoogleGenerativeAI

class QuizGenerator:
def __init__(self, topic=None, num_questions=1, vectorstore=None):
@@ -66,8 +66,8 @@ def init_llm(self):

:return: An instance or configuration for the LLM.
"""
self.llm = VertexAI(
model_name = "gemini-pro",
self.llm = GoogleGenerativeAI(
model = "gemini-1.5-pro",
temperature = 0.8, # Increased for less deterministic questions
max_output_tokens = 500
)
@@ -177,9 +177,7 @@ def validate_question(self, question: dict) -> bool:
if __name__ == "__main__":

embed_config = {
"model_name": "textembedding-gecko@003",
"project": "YOUR-PROJECT-ID-HERE",
"location": "us-central1"
"model_name": "models/embedding-001"
}

screen = st.empty()
4 changes: 1 addition & 3 deletions tasks/task_9/task_9.py
Original file line number Diff line number Diff line change
@@ -70,9 +70,7 @@ def next_question_index(self, direction=1):
if __name__ == "__main__":

embed_config = {
"model_name": "textembedding-gecko@003",
"project": "YOUR-PROJECT-ID-HERE",
"location": "us-central1"
"model_name": "models/embedding-001"
}

screen = st.empty()