-
Notifications
You must be signed in to change notification settings - Fork 39
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge branch 'main' into bugfix/function-calling-get-time-tool
- Loading branch information
Showing
2 changed files
with
208 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
207 changes: 207 additions & 0 deletions
207
integrations/haystack/haystack_sambanova_examples.ipynb
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,207 @@ | ||
{ | ||
"cells": [ | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"## Example question answering on a web page" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"!pip install haystack-ai==2.8.0" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 1, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"from haystack import Pipeline\n", | ||
"from haystack.utils import Secret\n", | ||
"from haystack.components.fetchers import LinkContentFetcher\n", | ||
"from haystack.components.converters import HTMLToDocument\n", | ||
"from haystack.components.builders import PromptBuilder\n", | ||
"from haystack.components.generators import OpenAIGenerator\n", | ||
"import getpass\n", | ||
"import os\n", | ||
"\n", | ||
"if \"SAMBANOVA_API_KEY\" not in os.environ:\n", | ||
" os.environ[\"SAMBANOVA_API_KEY\"] = getpass.getpass(\"insert your SambaNovaCloud API Key\")" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 2, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"To make a request using the SambaNova API, you have two options:\n", | ||
"\n", | ||
"**Option 1: Using a CURL command**\n", | ||
"\n", | ||
"1. Export your API key and URL in a terminal.\n", | ||
"2. Run the CURL command with the following parameters:\n", | ||
"\t* Authorization: Bearer $API_KEY\n", | ||
"\t* Content-Type: application/json\n", | ||
"\t* Data: a JSON object containing the model ID, messages, stop sequence, and other options.\n", | ||
"3. Send the request to the API using the POST method.\n", | ||
"\n", | ||
"**Option 2: Using the OpenAI client library in Python**\n", | ||
"\n", | ||
"1. Install the OpenAI library using pip.\n", | ||
"2. Create a Python file and import the OpenAI library.\n", | ||
"3. Initialize the OpenAI client with your API key and base URL.\n", | ||
"4. Use the `chat.completions.create` method to make an inference request, passing in the model ID, messages, and other options.\n", | ||
"5. Run the Python file to send the request and print the response.\n", | ||
"\n", | ||
"In both cases, you need to replace `<YOUR API KEY>` with your actual API key and use the correct model ID, such as \"Meta-Llama-3.1-405B-Instruct\".\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"fetcher = LinkContentFetcher()\n", | ||
"converter = HTMLToDocument()\n", | ||
"prompt_template = \"\"\"\n", | ||
"According to the contents of this website:\n", | ||
"{% for document in documents %}\n", | ||
" {{document.content}}\n", | ||
"{% endfor %}\n", | ||
"Answer the given question: {{query}}\n", | ||
"Answer:\n", | ||
"\"\"\"\n", | ||
"prompt_builder = PromptBuilder(template=prompt_template)\n", | ||
"llm = OpenAIGenerator(\n", | ||
" api_key=Secret.from_env_var(\"SAMBANOVA_API_KEY\"),\n", | ||
" api_base_url=\"https://api.sambanova.ai/v1/\",\n", | ||
" model=\"Meta-Llama-3.3-70B-Instruct\",\n", | ||
" generation_kwargs = {\"max_tokens\": 1024}\n", | ||
")\n", | ||
"pipeline = Pipeline()\n", | ||
"pipeline.add_component(\"fetcher\", fetcher)\n", | ||
"pipeline.add_component(\"converter\", converter)\n", | ||
"pipeline.add_component(\"prompt\", prompt_builder)\n", | ||
"pipeline.add_component(\"llm\", llm)\n", | ||
"\n", | ||
"pipeline.connect(\"fetcher.streams\", \"converter.sources\")\n", | ||
"pipeline.connect(\"converter.documents\", \"prompt.documents\")\n", | ||
"pipeline.connect(\"prompt.prompt\", \"llm.prompt\")\n", | ||
"\n", | ||
"result = pipeline.run({\"fetcher\": {\"urls\": [\"https://community.sambanova.ai/t/sambanova-cloud-quick-start-guide/194\"]},\n", | ||
" \"prompt\": {\"query\": \"How do I make a request using the SambaNova API?\"}})\n", | ||
"\n", | ||
"print(result[\"llm\"][\"replies\"][0])" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"## Example question answering using RAG" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 3, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"['Based on the given information, the SambaNova Cloud API has the following functionalities:\\n\\n1. Inference API (OpenAI compatible) for seamless text and image processing.\\n2. Function-Calling API for dynamic, agentic workflows, allowing models to suggest and select function calls based on user input.\\n3. Playground for interacting with multimodal models directly. \\n\\nAdditionally, it provides Python and Gradio code samples to simplify integrating AI models, enabling faster prototyping and reducing setup time.']\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"from haystack import Document, Pipeline\n", | ||
"from haystack.components.builders.prompt_builder import PromptBuilder\n", | ||
"from haystack.components.retrievers.in_memory import InMemoryBM25Retriever\n", | ||
"from haystack.document_stores.in_memory import InMemoryDocumentStore\n", | ||
"\n", | ||
"from haystack.components.generators import OpenAIGenerator\n", | ||
"import getpass\n", | ||
"import os\n", | ||
"\n", | ||
"if \"SAMBANOVA_API_KEY\" not in os.environ:\n", | ||
" os.environ[\"SAMBANOVA_API_KEY\"] = getpass.getpass(\"insert your SambaNovaCloud API Key\")\n", | ||
"\n", | ||
"document_store = InMemoryDocumentStore()\n", | ||
"document_store.write_documents(\n", | ||
" [\n", | ||
" Document(content=\"The Function-Calling API enables dynamic, agentic workflows by allowing the model to suggest and select function calls based on user input.\"\n", | ||
" \"This feature facilitates flexible agentic workflows that adapt to varied needs.\"),\n", | ||
" Document(content=\"Interact with multimodal models directly through the Inference API (OpenAI compatible) and Playground\"\n", | ||
" \"for seamless text and image processing.\"),\n", | ||
" Document(\n", | ||
" content=\"New Python and Gradio code samples make it easier to build and deploy applications on SambaNova Cloud. These examples simplify\"\n", | ||
" \"integrating AI models, enabling faster prototyping and reducing setup time.\"\n", | ||
" ),\n", | ||
" ]\n", | ||
")\n", | ||
"\n", | ||
"template = \"\"\"\n", | ||
"Given only the following information, answer the question.\n", | ||
"Ignore your own knowledge.\n", | ||
"\n", | ||
"Context:\n", | ||
"{% for document in documents %}\n", | ||
" {{ document.content }}\n", | ||
"{% endfor %}\n", | ||
"\n", | ||
"Question: {{ query }}?\n", | ||
"\"\"\"\n", | ||
"\n", | ||
"llm = OpenAIGenerator(\n", | ||
" api_key=Secret.from_env_var(\"SAMBANOVA_API_KEY\"),\n", | ||
" api_base_url=\"https://api.sambanova.ai/v1\",\n", | ||
" model=\"Meta-Llama-3.3-70B-Instruct\",\n", | ||
" generation_kwargs = {\"max_tokens\": 1024}\n", | ||
")\n", | ||
"\n", | ||
"pipe = Pipeline()\n", | ||
"\n", | ||
"pipe.add_component(\"retriever\", InMemoryBM25Retriever(document_store=document_store))\n", | ||
"pipe.add_component(\"prompt_builder\", PromptBuilder(template=template))\n", | ||
"pipe.add_component(\"llm\", llm)\n", | ||
"pipe.connect(\"retriever\", \"prompt_builder.documents\")\n", | ||
"pipe.connect(\"prompt_builder\", \"llm\")\n", | ||
"\n", | ||
"query = \"Functionalities of Sambanova API?\"\n", | ||
"\n", | ||
"response = pipe.run({\"prompt_builder\": {\"query\": query}, \"retriever\": {\"query\": query}})\n", | ||
"\n", | ||
"print(response[\"llm\"][\"replies\"])" | ||
] | ||
} | ||
], | ||
"metadata": { | ||
"kernelspec": { | ||
"display_name": "haystack_test", | ||
"language": "python", | ||
"name": "python3" | ||
}, | ||
"language_info": { | ||
"codemirror_mode": { | ||
"name": "ipython", | ||
"version": 3 | ||
}, | ||
"file_extension": ".py", | ||
"mimetype": "text/x-python", | ||
"name": "python", | ||
"nbconvert_exporter": "python", | ||
"pygments_lexer": "ipython3", | ||
"version": "3.9.5" | ||
} | ||
}, | ||
"nbformat": 4, | ||
"nbformat_minor": 2 | ||
} |