Skip to content

Commit

Permalink
infra: add -p to mkdir in lint steps (langchain-ai#17013)
Browse files Browse the repository at this point in the history
Previously, if this did not find a mypy cache then it wouldnt run

this makes it always run

adding mypy ignore comments with existing uncaught issues to unblock other prs

---------

Co-authored-by: Erick Friis <[email protected]>
Co-authored-by: Bagatur <[email protected]>
  • Loading branch information
3 people authored Feb 5, 2024
1 parent db6af21 commit 4eda647
Show file tree
Hide file tree
Showing 103 changed files with 378 additions and 369 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/_lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ jobs:
with:
path: |
${{ env.WORKDIR }}/.mypy_cache
key: mypy-lint-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', env.WORKDIR)) }}
key: mypy-lint-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}


- name: Analysing the code with our lint
Expand All @@ -105,7 +105,7 @@ jobs:
# It doesn't matter how you change it, any change will cause a cache-bust.
working-directory: ${{ inputs.working-directory }}
run: |
poetry install --with test,test_integration
poetry install --with test
- name: Get .mypy_cache_test to speed up mypy
uses: actions/cache@v3
Expand All @@ -114,7 +114,7 @@ jobs:
with:
path: |
${{ env.WORKDIR }}/.mypy_cache_test
key: mypy-test-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', env.WORKDIR)) }}
key: mypy-test-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}

- name: Analysing the code with our lint
working-directory: ${{ inputs.working-directory }}
Expand Down
2 changes: 1 addition & 1 deletion libs/community/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ lint lint_diff lint_package lint_tests:
poetry run ruff .
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff format $(PYTHON_FILES) --diff
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff --select I $(PYTHON_FILES)
[ "$(PYTHON_FILES)" = "" ] || mkdir -p $(MYPY_CACHE) || poetry run mypy $(PYTHON_FILES) --cache-dir $(MYPY_CACHE)
[ "$(PYTHON_FILES)" = "" ] || mkdir -p $(MYPY_CACHE) && poetry run mypy $(PYTHON_FILES) --cache-dir $(MYPY_CACHE)

format format_diff:
poetry run ruff format $(PYTHON_FILES)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def _run(self, text: str) -> str:
raise e
data_params = data.get("params")
response = self.requests_wrapper.get(data["url"], params=data_params)
response = response[: self.response_length]
response = response[: self.response_length] # type: ignore[index]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
Expand Down Expand Up @@ -115,7 +115,7 @@ def _run(self, text: str) -> str:
except json.JSONDecodeError as e:
raise e
response = self.requests_wrapper.post(data["url"], data["data"])
response = response[: self.response_length]
response = response[: self.response_length] # type: ignore[index]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
Expand Down Expand Up @@ -146,7 +146,7 @@ def _run(self, text: str) -> str:
except json.JSONDecodeError as e:
raise e
response = self.requests_wrapper.patch(data["url"], data["data"])
response = response[: self.response_length]
response = response[: self.response_length] # type: ignore[index]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
Expand Down Expand Up @@ -177,7 +177,7 @@ def _run(self, text: str) -> str:
except json.JSONDecodeError as e:
raise e
response = self.requests_wrapper.put(data["url"], data["data"])
response = response[: self.response_length]
response = response[: self.response_length] # type: ignore[index]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
Expand Down Expand Up @@ -209,7 +209,7 @@ def _run(self, text: str) -> str:
except json.JSONDecodeError as e:
raise e
response = self.requests_wrapper.delete(data["url"])
response = response[: self.response_length]
response = response[: self.response_length] # type: ignore[index]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
Expand Down
8 changes: 4 additions & 4 deletions libs/community/langchain_community/agent_toolkits/sql/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,12 +177,12 @@ def create_sql_agent(
elif agent_type == AgentType.OPENAI_FUNCTIONS:
if prompt is None:
messages = [
SystemMessage(content=prefix),
SystemMessage(content=prefix), # type: ignore[arg-type]
HumanMessagePromptTemplate.from_template("{input}"),
AIMessage(content=suffix or SQL_FUNCTIONS_SUFFIX),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
prompt = ChatPromptTemplate.from_messages(messages)
prompt = ChatPromptTemplate.from_messages(messages) # type: ignore[arg-type]
agent = RunnableAgent(
runnable=create_openai_functions_agent(llm, tools, prompt),
input_keys_arg=["input"],
Expand All @@ -191,12 +191,12 @@ def create_sql_agent(
elif agent_type == "openai-tools":
if prompt is None:
messages = [
SystemMessage(content=prefix),
SystemMessage(content=prefix), # type: ignore[arg-type]
HumanMessagePromptTemplate.from_template("{input}"),
AIMessage(content=suffix or SQL_FUNCTIONS_SUFFIX),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
prompt = ChatPromptTemplate.from_messages(messages)
prompt = ChatPromptTemplate.from_messages(messages) # type: ignore[arg-type]
agent = RunnableMultiActionAgent(
runnable=create_openai_tools_agent(llm, tools, prompt),
input_keys_arg=["input"],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -723,7 +723,7 @@ def _create_session_analysis_df(self) -> Any:
)
return session_analysis_df

def _contain_llm_records(self):
def _contain_llm_records(self): # type: ignore[no-untyped-def]
return bool(self.records["on_llm_start_records"])

def flush_tracker(self, langchain_asset: Any = None, finish: bool = False) -> None:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def __init__(
):
self.index: str = index
self.session_id: str = session_id
self.ensure_ascii: bool = esnsure_ascii
self.ensure_ascii: bool = esnsure_ascii # type: ignore[assignment]

# Initialize Elasticsearch client from passed client arg or connection info
if es_connection is not None:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def __init__(
self.session_id = session_id
self.table_name = table_name
self.earliest_time = earliest_time
self.cache = []
self.cache = [] # type: ignore[var-annotated]

# Set up SQLAlchemy engine and session
self.engine = create_engine(connection_string)
Expand Down Expand Up @@ -102,7 +102,7 @@ def _load_messages_to_cache(self) -> None:
logger.error(f"Error loading messages to cache: {e}")

@property
def messages(self) -> List[BaseMessage]:
def messages(self) -> List[BaseMessage]: # type: ignore[override]
"""returns all messages"""
if len(self.cache) == 0:
self.reload_cache()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ def _get_memory(self) -> Optional[Memory]:
return None
return zep_memory

def add_user_message(
def add_user_message( # type: ignore[override]
self, message: str, metadata: Optional[Dict[str, Any]] = None
) -> None:
"""Convenience method for adding a human message string to the store.
Expand All @@ -160,7 +160,7 @@ def add_user_message(
"""
self.add_message(HumanMessage(content=message), metadata=metadata)

def add_ai_message(
def add_ai_message( # type: ignore[override]
self, message: str, metadata: Optional[Dict[str, Any]] = None
) -> None:
"""Convenience method for adding an AI message string to the store.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@


class LlamaContentFormatter(ContentFormatterBase):
def __init__(self):
def __init__(self): # type: ignore[no-untyped-def]
raise TypeError(
"`LlamaContentFormatter` is deprecated for chat models. Use "
"`LlamaChatContentFormatter` instead."
Expand Down Expand Up @@ -72,7 +72,7 @@ def _convert_message_to_dict(message: BaseMessage) -> Dict:
def supported_api_types(self) -> List[AzureMLEndpointApiType]:
return [AzureMLEndpointApiType.realtime, AzureMLEndpointApiType.serverless]

def format_request_payload(
def format_request_payload( # type: ignore[override]
self,
messages: List[BaseMessage],
model_kwargs: Dict,
Expand All @@ -98,17 +98,17 @@ def format_request_payload(
raise ValueError(
f"`api_type` {api_type} is not supported by this formatter"
)
return str.encode(request_payload)
return str.encode(request_payload) # type: ignore[return-value]

def format_response_payload(
def format_response_payload( # type: ignore[override]
self, output: bytes, api_type: AzureMLEndpointApiType
) -> ChatGeneration:
"""Formats response"""
if api_type == AzureMLEndpointApiType.realtime:
try:
choice = json.loads(output)["output"]
except (KeyError, IndexError, TypeError) as e:
raise ValueError(self.format_error_msg.format(api_type=api_type)) from e
raise ValueError(self.format_error_msg.format(api_type=api_type)) from e # type: ignore[union-attr]
return ChatGeneration(
message=BaseMessage(
content=choice.strip(),
Expand All @@ -125,7 +125,7 @@ def format_response_payload(
"model. Expected `dict` but `{type(choice)}` was received."
)
except (KeyError, IndexError, TypeError) as e:
raise ValueError(self.format_error_msg.format(api_type=api_type)) from e
raise ValueError(self.format_error_msg.format(api_type=api_type)) from e # type: ignore[union-attr]
return ChatGeneration(
message=BaseMessage(
content=choice["message"]["content"].strip(),
Expand Down
8 changes: 4 additions & 4 deletions libs/community/langchain_community/chat_models/edenai.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ def _stream(
"""Call out to EdenAI's chat endpoint."""
url = f"{self.edenai_api_url}/text/chat/stream"
headers = {
"Authorization": f"Bearer {self.edenai_api_key.get_secret_value()}",
"Authorization": f"Bearer {self.edenai_api_key.get_secret_value()}", # type: ignore[union-attr]
"User-Agent": self.get_user_agent(),
}
formatted_data = _format_edenai_messages(messages=messages)
Expand Down Expand Up @@ -216,7 +216,7 @@ async def _astream(
) -> AsyncIterator[ChatGenerationChunk]:
url = f"{self.edenai_api_url}/text/chat/stream"
headers = {
"Authorization": f"Bearer {self.edenai_api_key.get_secret_value()}",
"Authorization": f"Bearer {self.edenai_api_key.get_secret_value()}", # type: ignore[union-attr]
"User-Agent": self.get_user_agent(),
}
formatted_data = _format_edenai_messages(messages=messages)
Expand Down Expand Up @@ -265,7 +265,7 @@ def _generate(

url = f"{self.edenai_api_url}/text/chat"
headers = {
"Authorization": f"Bearer {self.edenai_api_key.get_secret_value()}",
"Authorization": f"Bearer {self.edenai_api_key.get_secret_value()}", # type: ignore[union-attr]
"User-Agent": self.get_user_agent(),
}
formatted_data = _format_edenai_messages(messages=messages)
Expand Down Expand Up @@ -323,7 +323,7 @@ async def _agenerate(

url = f"{self.edenai_api_url}/text/chat"
headers = {
"Authorization": f"Bearer {self.edenai_api_key.get_secret_value()}",
"Authorization": f"Bearer {self.edenai_api_key.get_secret_value()}", # type: ignore[union-attr]
"User-Agent": self.get_user_agent(),
}
formatted_data = _format_edenai_messages(messages=messages)
Expand Down
2 changes: 1 addition & 1 deletion libs/community/langchain_community/chat_models/ernie.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = [
ChatGeneration(
message=AIMessage(
content=response.get("result"),
content=response.get("result"), # type: ignore[arg-type]
additional_kwargs={**additional_kwargs},
)
)
Expand Down
10 changes: 5 additions & 5 deletions libs/community/langchain_community/chat_models/gpt_router.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ class GPTRouterModel(BaseModel):
provider_name: str


def get_ordered_generation_requests(
def get_ordered_generation_requests( # type: ignore[no-untyped-def, no-untyped-def]
models_priority_list: List[GPTRouterModel], **kwargs
):
"""
Expand Down Expand Up @@ -100,7 +100,7 @@ def completion_with_retry(
models_priority_list: List[GPTRouterModel],
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Union[GenerationResponse, Generator[ChunkedGenerationResponse]]:
) -> Union[GenerationResponse, Generator[ChunkedGenerationResponse]]: # type: ignore[type-arg]
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)

Expand All @@ -122,7 +122,7 @@ async def acompletion_with_retry(
models_priority_list: List[GPTRouterModel],
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Union[GenerationResponse, AsyncGenerator[ChunkedGenerationResponse]]:
) -> Union[GenerationResponse, AsyncGenerator[ChunkedGenerationResponse]]: # type: ignore[type-arg]
"""Use tenacity to retry the async completion call."""

retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
Expand Down Expand Up @@ -282,7 +282,7 @@ async def _agenerate(
)
return self._create_chat_result(response)

def _create_chat_generation_chunk(
def _create_chat_generation_chunk( # type: ignore[no-untyped-def, no-untyped-def]
self, data: Mapping[str, Any], default_chunk_class
):
chunk = _convert_delta_to_message_chunk(
Expand All @@ -293,7 +293,7 @@ def _create_chat_generation_chunk(
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
default_chunk_class = chunk.__class__
chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info)
chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info) # type: ignore[assignment]
return chunk, default_chunk_class

def _stream(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ def _resolve_model_id(self) -> None:

elif isinstance(self.llm, HuggingFaceHub):
# no need to look up model_id for HuggingFaceHub LLM
self.model_id = self.llm.repo_id
self.model_id = self.llm.repo_id # type: ignore[assignment]
return

else:
Expand Down
2 changes: 1 addition & 1 deletion libs/community/langchain_community/chat_models/konko.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ def get_available_models(
}

if openai_api_key:
headers["X-OpenAI-Api-Key"] = openai_api_key.get_secret_value()
headers["X-OpenAI-Api-Key"] = openai_api_key.get_secret_value() # type: ignore[union-attr]

models_response = requests.get(models_url, headers=headers)

Expand Down
22 changes: 11 additions & 11 deletions libs/community/langchain_community/chat_models/ollama.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,10 +74,10 @@ def _format_message_as_text(self, message: BaseMessage) -> str:
if isinstance(message, ChatMessage):
message_text = f"\n\n{message.role.capitalize()}: {message.content}"
elif isinstance(message, HumanMessage):
if message.content[0].get("type") == "text":
message_text = f"[INST] {message.content[0]['text']} [/INST]"
elif message.content[0].get("type") == "image_url":
message_text = message.content[0]["image_url"]["url"]
if message.content[0].get("type") == "text": # type: ignore[union-attr]
message_text = f"[INST] {message.content[0]['text']} [/INST]" # type: ignore[index]
elif message.content[0].get("type") == "image_url": # type: ignore[union-attr]
message_text = message.content[0]["image_url"]["url"] # type: ignore[index, index]
elif isinstance(message, AIMessage):
message_text = f"{message.content}"
elif isinstance(message, SystemMessage):
Expand Down Expand Up @@ -112,11 +112,11 @@ def _convert_messages_to_ollama_messages(
content = message.content
else:
for content_part in message.content:
if content_part.get("type") == "text":
content += f"\n{content_part['text']}"
elif content_part.get("type") == "image_url":
if isinstance(content_part.get("image_url"), str):
image_url_components = content_part["image_url"].split(",")
if content_part.get("type") == "text": # type: ignore[union-attr]
content += f"\n{content_part['text']}" # type: ignore[index]
elif content_part.get("type") == "image_url": # type: ignore[union-attr]
if isinstance(content_part.get("image_url"), str): # type: ignore[union-attr]
image_url_components = content_part["image_url"].split(",") # type: ignore[index]
# Support data:image/jpeg;base64,<image> format
# and base64 strings
if len(image_url_components) > 1:
Expand All @@ -142,7 +142,7 @@ def _convert_messages_to_ollama_messages(
}
)

return ollama_messages
return ollama_messages # type: ignore[return-value]

def _create_chat_stream(
self,
Expand Down Expand Up @@ -337,7 +337,7 @@ async def _astream(
verbose=self.verbose,
)
except OllamaEndpointNotFoundError:
async for chunk in self._legacy_astream(messages, stop, **kwargs):
async for chunk in self._legacy_astream(messages, stop, **kwargs): # type: ignore[attr-defined]
yield chunk

@deprecated("0.0.3", alternative="_stream")
Expand Down
2 changes: 1 addition & 1 deletion libs/community/langchain_community/chat_models/tongyi.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ def _default_params(self) -> Dict[str, Any]:
return {
"model": self.model_name,
"top_p": self.top_p,
"api_key": self.dashscope_api_key.get_secret_value(),
"api_key": self.dashscope_api_key.get_secret_value(), # type: ignore[union-attr]
"result_format": "message",
**self.model_kwargs,
}
Expand Down
2 changes: 1 addition & 1 deletion libs/community/langchain_community/chat_models/vertexai.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def _convert_to_prompt(part: Union[str, Dict]) -> Part:
elif path.startswith("data:image/"):
# extract base64 component from image uri
try:
encoded = re.search(r"data:image/\w{2,4};base64,(.*)", path).group(
encoded = re.search(r"data:image/\w{2,4};base64,(.*)", path).group( # type: ignore[union-attr]
1
)
except AttributeError:
Expand Down
Loading

0 comments on commit 4eda647

Please sign in to comment.