Skip to content

Commit

Permalink
nbqa adedd to pre-commit, added black and ruff for notebooks (microso…
Browse files Browse the repository at this point in the history
…ft#1171)

* nbqa adedd to pre-commit, added black and ruff for notebooks

* polishing

* polishing

* polishing
  • Loading branch information
davorrunje authored Jan 8, 2024
1 parent 643a031 commit 1c4ae3d
Show file tree
Hide file tree
Showing 47 changed files with 2,235 additions and 2,265 deletions.
6 changes: 6 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -43,3 +43,9 @@ repos:
website/yarn.lock |
notebook/.*
)$
- repo: https://github.com/nbQA-dev/nbQA
rev: 1.7.1
hooks:
- id: nbqa-ruff
args: ["--fix"]
- id: nbqa-black
791 changes: 397 additions & 394 deletions notebook/Async_human_input.ipynb

Large diffs are not rendered by default.

18 changes: 10 additions & 8 deletions notebook/agentchat_MathChat.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,10 @@
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"import autogen\n",
"from autogen.agentchat.contrib.math_user_proxy_agent import MathUserProxyAgent\n",
"\n",
"config_list = autogen.config_list_from_json(\n",
" \"OAI_CONFIG_LIST\",\n",
Expand All @@ -70,7 +73,7 @@
" \"gpt-35-turbo-v0301\",\n",
" \"gpt\",\n",
" }\n",
" }\n",
" },\n",
")"
]
},
Expand Down Expand Up @@ -126,23 +129,21 @@
"metadata": {},
"outputs": [],
"source": [
"from autogen.agentchat.contrib.math_user_proxy_agent import MathUserProxyAgent\n",
"\n",
"# 1. create an AssistantAgent instance named \"assistant\"\n",
"assistant = autogen.AssistantAgent(\n",
" name=\"assistant\", \n",
" name=\"assistant\",\n",
" system_message=\"You are a helpful assistant.\",\n",
" llm_config={\n",
" \"timeout\": 600,\n",
" \"seed\": 42,\n",
" \"config_list\": config_list,\n",
" }\n",
" },\n",
")\n",
"\n",
"# 2. create the MathUserProxyAgent instance named \"mathproxyagent\"\n",
"# By default, the human_input_mode is \"NEVER\", which means the agent will not ask for human input.\n",
"mathproxyagent = MathUserProxyAgent(\n",
" name=\"mathproxyagent\", \n",
" name=\"mathproxyagent\",\n",
" human_input_mode=\"NEVER\",\n",
" code_execution_config={\"use_docker\": False},\n",
")"
Expand Down Expand Up @@ -176,7 +177,9 @@
"# given a math problem, we use the mathproxyagent to generate a prompt to be sent to the assistant as the initial message.\n",
"# the assistant receives the message and generates a response. The response will be sent back to the mathproxyagent for processing.\n",
"# The conversation continues until the termination condition is met, in MathChat, the termination condition is the detect of \"\\boxed{}\" in the response.\n",
"math_problem = \"Find all $x$ that satisfy the inequality $(2x+10)(x+3)<(3x+9)(x+8)$. Express your answer in interval notation.\"\n",
"math_problem = (\n",
" \"Find all $x$ that satisfy the inequality $(2x+10)(x+3)<(3x+9)(x+8)$. Express your answer in interval notation.\"\n",
")\n",
"mathproxyagent.initiate_chat(assistant, problem=math_problem)"
]
},
Expand Down Expand Up @@ -291,7 +294,6 @@
"outputs": [],
"source": [
"# The wolfram alpha app id is required for this example (the assistant may choose to query Wolfram Alpha).\n",
"import os\n",
"if \"WOLFRAM_ALPHA_APPID\" not in os.environ:\n",
" os.environ[\"WOLFRAM_ALPHA_APPID\"] = open(\"wolfram.txt\").read().strip()\n",
"\n",
Expand Down
38 changes: 19 additions & 19 deletions notebook/agentchat_RetrieveChat.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,18 @@
}
],
"source": [
"import json\n",
"import os\n",
"\n",
"import chromadb\n",
"\n",
"import autogen\n",
"from autogen.agentchat.contrib.retrieve_assistant_agent import RetrieveAssistantAgent\n",
"from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent\n",
"\n",
"# Accepted file formats for that can be stored in\n",
"# a vector database instance\n",
"from autogen.retrieve_utils import TEXT_FORMATS\n",
"\n",
"config_list = autogen.config_list_from_json(\n",
" env_or_file=\"OAI_CONFIG_LIST\",\n",
Expand Down Expand Up @@ -161,10 +172,6 @@
}
],
"source": [
"# Accepted file formats for that can be stored in \n",
"# a vector database instance\n",
"from autogen.retrieve_utils import TEXT_FORMATS\n",
"\n",
"print(\"Accepted file formats for `docs_path`:\")\n",
"print(TEXT_FORMATS)"
]
Expand All @@ -175,11 +182,6 @@
"metadata": {},
"outputs": [],
"source": [
"from autogen.agentchat.contrib.retrieve_assistant_agent import RetrieveAssistantAgent\n",
"from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent\n",
"import chromadb\n",
"import os\n",
"\n",
"# 1. create an RetrieveAssistantAgent instance named \"assistant\"\n",
"assistant = RetrieveAssistantAgent(\n",
" name=\"assistant\",\n",
Expand Down Expand Up @@ -210,7 +212,7 @@
" \"docs_path\": [\n",
" \"https://raw.githubusercontent.com/microsoft/FLAML/main/website/docs/Examples/Integrate%20-%20Spark.md\",\n",
" \"https://raw.githubusercontent.com/microsoft/FLAML/main/website/docs/Research.md\",\n",
" os.path.join(os.path.abspath(''), \"..\", \"website\", \"docs\"),\n",
" os.path.join(os.path.abspath(\"\"), \"..\", \"website\", \"docs\"),\n",
" ],\n",
" \"custom_text_types\": [\"mdx\"],\n",
" \"chunk_token_size\": 2000,\n",
Expand All @@ -219,7 +221,7 @@
" \"embedding_model\": \"all-mpnet-base-v2\",\n",
" \"get_or_create\": True, # set to False if you don't want to reuse an existing collection, but you'll need to remove the collection manually\n",
" },\n",
" code_execution_config=False, # set to False if you don't want to execute the code\n",
" code_execution_config=False, # set to False if you don't want to execute the code\n",
")"
]
},
Expand Down Expand Up @@ -525,7 +527,9 @@
"# The conversation continues until the termination condition is met, in RetrieveChat, the termination condition when no human-in-loop is no code block detected.\n",
"# With human-in-loop, the conversation will continue until the user says \"exit\".\n",
"code_problem = \"How can I use FLAML to perform a classification task and use spark to do parallel training. Train 30 seconds and force cancel jobs if time limit is reached.\"\n",
"ragproxyagent.initiate_chat(assistant, problem=code_problem, search_string=\"spark\") # search_string is used as an extra filter for the embeddings search, in this case, we only want to search documents that contain \"spark\"."
"ragproxyagent.initiate_chat(\n",
" assistant, problem=code_problem, search_string=\"spark\"\n",
") # search_string is used as an extra filter for the embeddings search, in this case, we only want to search documents that contain \"spark\"."
]
},
{
Expand Down Expand Up @@ -2122,8 +2126,6 @@
}
],
"source": [
"import json\n",
"\n",
"# queries_file = \"https://huggingface.co/datasets/thinkall/NaturalQuestionsQA/resolve/main/queries.jsonl\"\n",
"queries = \"\"\"{\"_id\": \"ce2342e1feb4e119cb273c05356b33309d38fa132a1cbeac2368a337e38419b8\", \"text\": \"what is non controlling interest on balance sheet\", \"metadata\": {\"answer\": [\"the portion of a subsidiary corporation 's stock that is not owned by the parent corporation\"]}}\n",
"{\"_id\": \"3a10ff0e520530c0aa33b2c7e8d989d78a8cd5d699201fc4b13d3845010994ee\", \"text\": \"how many episodes are in chicago fire season 4\", \"metadata\": {\"answer\": [\"23\"]}}\n",
Expand Down Expand Up @@ -2645,7 +2647,7 @@
"\n",
" # reset the assistant. Always reset the assistant before starting a new conversation.\n",
" assistant.reset()\n",
" \n",
"\n",
" qa_problem = questions[i]\n",
" ragproxyagent.initiate_chat(assistant, problem=qa_problem, n_results=30)"
]
Expand Down Expand Up @@ -2724,7 +2726,7 @@
"metadata": {},
"outputs": [],
"source": [
"#create the RetrieveUserProxyAgent instance named \"ragproxyagent\"\n",
"# create the RetrieveUserProxyAgent instance named \"ragproxyagent\"\n",
"corpus_file = \"https://huggingface.co/datasets/thinkall/2WikiMultihopQA/resolve/main/corpus.txt\"\n",
"\n",
"# Create a new collection for NaturalQuestions dataset\n",
Expand Down Expand Up @@ -2762,8 +2764,6 @@
}
],
"source": [
"import json\n",
"\n",
"# queries_file = \"https://huggingface.co/datasets/thinkall/2WikiMultihopQA/resolve/main/queries.jsonl\"\n",
"queries = \"\"\"{\"_id\": \"61a46987092f11ebbdaeac1f6bf848b6\", \"text\": \"Which film came out first, Blind Shaft or The Mask Of Fu Manchu?\", \"metadata\": {\"answer\": [\"The Mask Of Fu Manchu\"]}}\n",
"{\"_id\": \"a7b9672009c311ebbdb0ac1f6bf848b6\", \"text\": \"Are North Marion High School (Oregon) and Seoul High School both located in the same country?\", \"metadata\": {\"answer\": [\"no\"]}}\n",
Expand Down Expand Up @@ -3075,7 +3075,7 @@
"\n",
" # reset the assistant. Always reset the assistant before starting a new conversation.\n",
" assistant.reset()\n",
" \n",
"\n",
" qa_problem = questions[i]\n",
" ragproxyagent.initiate_chat(assistant, problem=qa_problem, n_results=10)"
]
Expand Down
Loading

0 comments on commit 1c4ae3d

Please sign in to comment.