From 31ef9135e722c034f1281addc37027be7a6368e9 Mon Sep 17 00:00:00 2001
From: Samuel Devdas <84823680+SamuelDevdas@users.noreply.github.com>
Date: Tue, 13 Aug 2024 08:28:14 +0200
Subject: [PATCH] Update Config params when using Local Ollama models (#1690)
---
docs/open-source/quickstart.mdx | 28 +++++++++++++++++++++-------
1 file changed, 21 insertions(+), 7 deletions(-)
diff --git a/docs/open-source/quickstart.mdx b/docs/open-source/quickstart.mdx
index 7a4842646b..7d6248ed1b 100644
--- a/docs/open-source/quickstart.mdx
+++ b/docs/open-source/quickstart.mdx
@@ -206,18 +206,32 @@ import os
from mem0 import Memory
config = {
- "vector_store":{
+ "vector_store": {
"provider": "qdrant",
"config": {
- "embedding_model_dims": 768 # change according to embedding model
- }
+ "collection_name": "test",
+ "host": "localhost",
+ "port": 6333,
+ "embedding_model_dims": 768, # (For Nomic == 768), could be some other embedding size, change this according to your local models dimensions
+ },
},
"llm": {
- "provider": "ollama"
+ "provider": "ollama",
+ "config": {
+ "model": "llama3.1:latest",
+ "temperature": 0,
+ "max_tokens": 8000,
+ "ollama_base_url": "http://localhost:11434", # Ensure this is correct
+ },
},
"embedder": {
- "provider": "ollama"
- }
+ "provider": "ollama",
+ "config": {
+ "model": "nomic-embed-text:latest",
+ # "model": "snowflake-arctic-embed:latest",
+ "ollama_base_url": "http://localhost:11434",
+ },
+ },
}
m = Memory.from_config(config)
@@ -325,4 +339,4 @@ print(history)
If you have any questions, please feel free to reach out to us using one of the following methods:
-
\ No newline at end of file
+