diff --git a/requirements.txt b/requirements.txt index cf1edea..4e22728 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,4 +69,5 @@ scikit-optimize pinecone-client pystemmer langchain_groq -langchain-google-genai \ No newline at end of file +langchain-google-genai +langchain-google-vertexai \ No newline at end of file diff --git a/setup.py b/setup.py index 7ed0fd4..927b061 100644 --- a/setup.py +++ b/setup.py @@ -100,7 +100,8 @@ 'pinecone-client', 'setuptools', 'langchain_groq', - 'langchain-google-genai' + 'langchain-google-genai', + 'langchain-google-vertexai' # other dependencies ], ) diff --git a/src/ragbuilder/executor.py b/src/ragbuilder/executor.py index 10be2cf..388bd9b 100644 --- a/src/ragbuilder/executor.py +++ b/src/ragbuilder/executor.py @@ -68,6 +68,7 @@ from langchain_groq import ChatGroq from langchain_openai import AzureOpenAIEmbeddings, AzureChatOpenAI from langchain_google_genai import ChatGoogleGenerativeAI,GoogleGenerativeAIEmbeddings +from langchain_google_vertexai import ChatVertexAI, VertexAIEmbeddings # import local modules from ragbuilder.langchain_module.retriever.retriever import * @@ -287,8 +288,8 @@ def __init__(self, val): logger.info("Creating RAG object from generated code...(this may take a while in some cases)") try: #execution os string + # logger.info(f"Generated Code\n{self.router}") exec(self.router,globals_dict,locals_dict) - logger.debug(f"Generated Code\n{self.router}") #old rag func hooked to eval self.rag = locals_dict['rag_pipeline']() diff --git a/src/ragbuilder/langchain_module/embedding_model/embedding.py b/src/ragbuilder/langchain_module/embedding_model/embedding.py index d47b6af..81d761f 100644 --- a/src/ragbuilder/langchain_module/embedding_model/embedding.py +++ b/src/ragbuilder/langchain_module/embedding_model/embedding.py @@ -15,6 +15,7 @@ def getEmbedding(**kwargs): raise KeyError("The key 'embedding_model' is missing from the arguments.") embedding_model = kwargs['embedding_model'] + model_owner= embedding_model.split(":")[0] model= embedding_model.split(":")[1] # Validate the embedding model type @@ -39,6 +40,10 @@ def getEmbedding(**kwargs): logger.info(f"Google Embedding Invoked: {embedding_model}") code_string= f"""embedding = GoogleGenerativeAIEmbeddings(model='{model}')""" import_string = f"""from langchain_google_genai import GoogleGenerativeAIEmbeddings""" + elif model_owner == "GoogleVertexAI": + logger.info(f"GoogleVertexAI Embedding Invoked: {embedding_model}") + code_string= f"""embedding = VertexAIEmbeddings(model_name='{model}') """ + import_string = f"""from langchain_google_genai import GoogleGenerativeAIEmbeddings""" elif model_owner == "Azure": logger.info(f"Azure Embedding Invoked: {embedding_model}") code_string= f"""embedding = AzureOpenAIEmbeddings(model='{model}')""" diff --git a/src/ragbuilder/langchain_module/llms/llmConfig.py b/src/ragbuilder/langchain_module/llms/llmConfig.py index 000d7e1..388bad5 100644 --- a/src/ragbuilder/langchain_module/llms/llmConfig.py +++ b/src/ragbuilder/langchain_module/llms/llmConfig.py @@ -23,6 +23,10 @@ def getLLM(**kwargs): logger.info(f"LLM Code Gen Invoked:Google") import_string = f"""from langchain_google_genai import ChatGoogleGenerativeAI""" code_string = f"""llm = ChatGoogleGenerativeAI(model='{model}')""" + elif model_owner == "GoogleVertexAI": + logger.info(f"LLM Code Gen Invoked:GoogleVertexAI") + import_string = f"""from langchain_google_vertexai import ChatVertexAI""" + code_string = f"""llm = ChatVertexAI(model_name='{model}')""" elif model_owner == "OpenAI": logger.info(f"LLM Code Gen Invoked: {retrieval_model}") import_string = f"""from langchain_openai import ChatOpenAI"""