Skip to content

Commit

Permalink
More ruff enables (PYI, SIM, etc.) (run-llama#8038)
Browse files Browse the repository at this point in the history
  • Loading branch information
jamesbraza authored Oct 10, 2023
1 parent 0a6c63f commit 85de3d9
Show file tree
Hide file tree
Showing 77 changed files with 251 additions and 326 deletions.
4 changes: 2 additions & 2 deletions benchmarks/agent/math_tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,12 @@


def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
"""Add two integers and returns the result integer."""
return a + b


def multiply(a: int, b: int) -> int:
"""Multiple two integers and returns the result integer"""
"""Multiple two integers and returns the result integer."""
return a * b


Expand Down
2 changes: 1 addition & 1 deletion benchmarks/embeddings/bench_embeddings.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def generate_strings(num_strings: int = 100, string_length: int = 10) -> List[st
offset 0: [0:string_length], [string_length:2*string_length], ...
offset 1: [1:1+string_length], [1+string_length:1+2*string_length],...
...
"""
""" # noqa: D415
content = (
SimpleDirectoryReader("../../examples/paul_graham_essay/data")
.load_data()[0]
Expand Down
4 changes: 2 additions & 2 deletions experimental/cli/cli_add.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@


def add_cli(args: Namespace) -> None:
"""Handle subcommand "add" """
"""Handle subcommand "add"."""
index = load_index()

for p in args.files:
Expand All @@ -26,7 +26,7 @@ def add_cli(args: Namespace) -> None:


def register_add_cli(subparsers: _SubParsersAction) -> None:
"""Register subcommand "add" to ArgumentParser"""
"""Register subcommand "add" to ArgumentParser."""
parser = subparsers.add_parser("add")
parser.add_argument(
"files",
Expand Down
4 changes: 2 additions & 2 deletions experimental/cli/cli_init.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,13 @@


def init_cli(args: Namespace) -> None:
"""Handle subcommand "init" """
"""Handle subcommand "init"."""
config = load_config(args.directory)
save_config(config, args.directory)


def register_init_cli(subparsers: _SubParsersAction) -> None:
"""Register subcommand "init" to ArgumentParser"""
"""Register subcommand "init" to ArgumentParser."""
parser = subparsers.add_parser("init")
parser.add_argument(
"directory",
Expand Down
4 changes: 2 additions & 2 deletions experimental/cli/cli_query.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,14 @@


def query_cli(args: Namespace) -> None:
"""Handle subcommand "query" """
"""Handle subcommand "query"."""
index = load_index()
query_engine = index.as_query_engine()
print(query_engine.query(args.query))


def register_query_cli(subparsers: _SubParsersAction) -> None:
"""Register subcommand "query" to ArgumentParser"""
"""Register subcommand "query" to ArgumentParser."""
parser = subparsers.add_parser("query")
parser.add_argument(
"query",
Expand Down
14 changes: 7 additions & 7 deletions experimental/cli/configuration.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,21 +28,21 @@


def load_config(root: str = ".") -> ConfigParser:
"""Load configuration from file"""
"""Load configuration from file."""
config = ConfigParser()
config.read_dict(DEFAULT_CONFIG)
config.read(os.path.join(root, CONFIG_FILE_NAME))
return config


def save_config(config: ConfigParser, root: str = ".") -> None:
"""Load configuration to file"""
"""Load configuration to file."""
with open(os.path.join(root, CONFIG_FILE_NAME), "w") as fd:
config.write(fd)


def load_index(root: str = ".") -> BaseIndex[Any]:
"""Load existing index file"""
"""Load existing index file."""
config = load_config(root)
service_context = _load_service_context(config)

Expand All @@ -69,14 +69,14 @@ def load_index(root: str = ".") -> BaseIndex[Any]:


def save_index(index: BaseIndex[Any], root: str = ".") -> None:
"""Save index to file"""
"""Save index to file."""
config = load_config(root)
persist_dir = config["store"]["persist_dir"]
index.storage_context.persist(persist_dir=persist_dir)


def _load_service_context(config: ConfigParser) -> ServiceContext:
"""Internal function to load service context based on configuration"""
"""Internal function to load service context based on configuration."""
embed_model = _load_embed_model(config)
llm_predictor = _load_llm_predictor(config)
return ServiceContext.from_defaults(
Expand All @@ -90,7 +90,7 @@ def _load_storage_context(config: ConfigParser) -> StorageContext:


def _load_llm_predictor(config: ConfigParser) -> LLMPredictor:
"""Internal function to load LLM predictor based on configuration"""
"""Internal function to load LLM predictor based on configuration."""
model_type = config["llm_predictor"]["type"].lower()
if model_type == "default":
llm = _load_llm(config["llm_predictor"])
Expand All @@ -110,7 +110,7 @@ def _load_llm(section: SectionProxy) -> LLM:


def _load_embed_model(config: ConfigParser) -> BaseEmbedding:
"""Internal function to load embedding model based on configuration"""
"""Internal function to load embedding model based on configuration."""
model_type = config["embed_model"]["type"]
if model_type == "default":
return OpenAIEmbedding()
Expand Down
2 changes: 1 addition & 1 deletion experimental/colbert_index/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ def query(self, query_str: str, top_k: int = 10) -> List[NodeWithScore]:
"""
doc_ids, _, scores = self.store.search(text=query_str, k=top_k)

node_doc_ids = list(map(lambda id: self._docs_pos_to_node_id[id], doc_ids))
node_doc_ids = [self._docs_pos_to_node_id[id] for id in doc_ids]
nodes = self.docstore.get_nodes(node_doc_ids)

nodes_with_score = []
Expand Down
2 changes: 1 addition & 1 deletion llama_index/agent/react/formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@


def get_react_tool_descriptions(tools: Sequence[BaseTool]) -> List[str]:
"""Tool"""
"""Tool."""
tool_descs = []
for tool in tools:
tool_desc = (
Expand Down
2 changes: 1 addition & 1 deletion llama_index/callbacks/llama_debug.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def _get_event_pairs(self, events: List[CBEvent]) -> List[List[CBEvent]]:
def _get_time_stats_from_event_pairs(
self, event_pairs: List[List[CBEvent]]
) -> EventStats:
"""Calculate time-based stats for a set of event pairs"""
"""Calculate time-based stats for a set of event pairs."""
total_secs = 0.0
for event_pair in event_pairs:
start_time = datetime.strptime(event_pair[0].time, TIMESTAMP_FORMAT)
Expand Down
2 changes: 1 addition & 1 deletion llama_index/callbacks/open_inference_callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ def as_dataframe(data: Iterable[BaseDataType]) -> "DataFrame":

@dataclass
class TraceData:
"""Trace data"""
"""Trace data."""

query_data: QueryData = field(default_factory=QueryData)
node_datas: List[NodeData] = field(default_factory=list)
Expand Down
2 changes: 1 addition & 1 deletion llama_index/callbacks/simple_llm_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@


class SimpleLLMHandler(BaseCallbackHandler):
"""Callback handler for printing llms inputs/outputs"""
"""Callback handler for printing llms inputs/outputs."""

def __init__(self) -> None:
super().__init__(event_starts_to_ignore=[], event_ends_to_ignore=[])
Expand Down
2 changes: 1 addition & 1 deletion llama_index/chat_engine/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ async def _agenerate_context(self, message: str) -> Tuple[str, List[NodeWithScor
return self._context_template.format(context_str=context_str), nodes

def _get_prefix_messages_with_context(self, context_str: str) -> List[ChatMessage]:
"""Get the prefix messages with context"""
"""Get the prefix messages with context."""
# ensure we grab the user-configured system prompt
system_prompt = ""
prefix_messages = self._prefix_messages
Expand Down
4 changes: 2 additions & 2 deletions llama_index/chat_engine/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,12 @@


def is_function(message: ChatMessage) -> bool:
"""Utility for ChatMessage responses from OpenAI models"""
"""Utility for ChatMessage responses from OpenAI models."""
return "function_call" in message.additional_kwargs


class ChatResponseMode(str, Enum):
"""Flag toggling waiting/streaming in `Agent._chat`"""
"""Flag toggling waiting/streaming in `Agent._chat`."""

WAIT = "wait"
STREAM = "stream"
Expand Down
2 changes: 1 addition & 1 deletion llama_index/embeddings/adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def __init__(
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
) -> None:
"""Init params"""
"""Init params."""
import torch

from llama_index.embeddings.adapter_utils import BaseAdapter, LinearLayer
Expand Down
2 changes: 1 addition & 1 deletion llama_index/evaluation/benchmarks/hotpotqa.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

class HotpotQAEvaluator:
"""
Refer to https://hotpotqa.github.io/ for more details on the dataset
Refer to https://hotpotqa.github.io/ for more details on the dataset.
"""

def _download_datasets(self) -> Dict[str, str]:
Expand Down
2 changes: 1 addition & 1 deletion llama_index/evaluation/dataset_generation.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
"""Dataset generation from documents"""
"""Dataset generation from documents."""
from __future__ import annotations

import asyncio
Expand Down
2 changes: 1 addition & 1 deletion llama_index/indices/base_retriever.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ async def _aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
def get_service_context(self) -> Optional[ServiceContext]:
"""Attempts to resolve a service context.
Short-circuits at self.service_context, self._service_context,
or self._index.service_context
or self._index.service_context.
"""
if hasattr(self, "service_context"):
return self.service_context
Expand Down
2 changes: 1 addition & 1 deletion llama_index/indices/loading.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def load_indices_from_storage(
index_ids: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> List[BaseIndex]:
"""Load multiple indices from storage context
"""Load multiple indices from storage context.
Args:
storage_context (StorageContext): storage context containing
Expand Down
2 changes: 1 addition & 1 deletion llama_index/indices/managed/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ class BaseManagedIndex(BaseIndex[IndexDict], ABC):
The managed service can index documents into a managed service.
How documents are structured into nodes is a detail for the managed service,
and not exposed in this interface (although could be controlled by
configuration parameters)
configuration parameters).
Args:
show_progress (bool): Whether to show tqdm progress bars. Defaults to False.
Expand Down
2 changes: 1 addition & 1 deletion llama_index/indices/managed/vectara/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ def insert_file(
) -> Optional[str]:
"""Vectara provides a way to add files (binary or text) directly via our API
where pre-processing and chunking occurs internally in an optimal way
This method provides a way to use that API in Llama_index
This method provides a way to use that API in Llama_index.
# ruff: noqa: E501
Full API Docs: https://docs.vectara.com/docs/api-reference/indexing-apis/
Expand Down
2 changes: 1 addition & 1 deletion llama_index/indices/managed/vectara/retriever.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""Vectara index.
An index that that is built on top of Vectara
An index that that is built on top of Vectara.
"""

import json
Expand Down
2 changes: 1 addition & 1 deletion llama_index/indices/postprocessor/node.py
Original file line number Diff line number Diff line change
Expand Up @@ -362,7 +362,7 @@ class LongContextReorder(BaseNodePostprocessor):
performance typically arises when crucial data is positioned
at the start or conclusion of the input context. Additionally,
as the input context lengthens, performance drops notably, even
in models designed for long contexts."
in models designed for long contexts.".
"""

@classmethod
Expand Down
4 changes: 2 additions & 2 deletions llama_index/llm_predictor/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ def _extend_prompt(
self,
formatted_prompt: str,
) -> str:
"""Add system and query wrapper prompts to base prompt"""
"""Add system and query wrapper prompts to base prompt."""
extended_prompt = formatted_prompt
if self.system_prompt:
extended_prompt = self.system_prompt + "\n\n" + extended_prompt
Expand All @@ -276,7 +276,7 @@ def _extend_prompt(
return extended_prompt

def _extend_messages(self, messages: List[ChatMessage]) -> List[ChatMessage]:
"""Add system prompt to chat message list"""
"""Add system prompt to chat message list."""
if self.system_prompt:
messages = [
ChatMessage(role=MessageRole.SYSTEM, content=self.system_prompt),
Expand Down
2 changes: 1 addition & 1 deletion llama_index/llms/azure_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

class AzureOpenAI(OpenAI):
"""
Azure OpenAI
Azure OpenAI.
To use this, you must first deploy a model on Azure OpenAI.
Unlike OpenAI, you need to specify a `engine` parameter to identify
Expand Down
4 changes: 2 additions & 2 deletions llama_index/llms/portkey.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""
Portkey integration with Llama_index for enhanced monitoring
Portkey integration with Llama_index for enhanced monitoring.
"""
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, Union, cast

Expand Down Expand Up @@ -38,7 +38,7 @@


class Portkey(CustomLLM):
"""_summary_
"""_summary_.
Args:
LLM (_type_): _description_
Expand Down
2 changes: 1 addition & 1 deletion llama_index/llms/portkey_utils.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""
Utility Tools for the Portkey Class
Utility Tools for the Portkey Class.
This file module contains a collection of utility functions designed to enhance
the functionality and usability of the Portkey class
Expand Down
2 changes: 1 addition & 1 deletion llama_index/llms/predibase.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@


class PredibaseLLM(CustomLLM):
"""Predibase LLM"""
"""Predibase LLM."""

model_name: str = Field(description="The Predibase model to use.")
predibase_api_key: str = Field(description="The Predibase API key to use.")
Expand Down
3 changes: 2 additions & 1 deletion llama_index/node_parser/extractors/metadata_extractors.py
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,8 @@ class SummaryExtractor(MetadataFeatureExtractor):
"""
Summary extractor. Node-level extractor with adjacent sharing.
Extracts `section_summary`, `prev_section_summary`, `next_section_summary`
metadata fields
metadata fields.
Args:
llm_predictor (Optional[BaseLLMPredictor]): LLM predictor
summaries (List[str]): list of summaries to extract: 'self', 'prev', 'next'
Expand Down
4 changes: 2 additions & 2 deletions llama_index/node_parser/file/html.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def get_nodes_from_documents(
return all_nodes

def get_nodes_from_node(self, node: BaseNode) -> List[TextNode]:
"""Get nodes from document"""
"""Get nodes from document."""
try:
from bs4 import BeautifulSoup
except ImportError:
Expand Down Expand Up @@ -144,7 +144,7 @@ def _build_node_from_split(
node: BaseNode,
metadata: dict,
) -> TextNode:
"""Build node from single text split"""
"""Build node from single text split."""
node = build_nodes_from_splits(
[text_split], node, self.include_metadata, self.include_prev_next_rel
)[0]
Expand Down
4 changes: 2 additions & 2 deletions llama_index/node_parser/file/json.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def get_nodes_from_documents(
return all_nodes

def get_nodes_from_node(self, node: BaseNode) -> List[TextNode]:
"""Get nodes from document"""
"""Get nodes from document."""
text = node.get_content(metadata_mode=MetadataMode.NONE)
try:
data = json.loads(text)
Expand Down Expand Up @@ -132,7 +132,7 @@ def _build_node_from_split(
node: BaseNode,
metadata: dict,
) -> TextNode:
"""Build node from single text split"""
"""Build node from single text split."""
node = build_nodes_from_splits(
[text_split], node, self.include_metadata, self.include_prev_next_rel
)[0]
Expand Down
Loading

0 comments on commit 85de3d9

Please sign in to comment.