Skip to content

Commit

Permalink
Update Config params when using Local Ollama models (mem0ai#1690)
Browse files Browse the repository at this point in the history
  • Loading branch information
SamuelDevdas authored Aug 13, 2024
1 parent 5cea479 commit 31ef913
Showing 1 changed file with 21 additions and 7 deletions.
28 changes: 21 additions & 7 deletions docs/open-source/quickstart.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -206,18 +206,32 @@ import os
from mem0 import Memory

config = {
"vector_store":{
"vector_store": {
"provider": "qdrant",
"config": {
"embedding_model_dims": 768 # change according to embedding model
}
"collection_name": "test",
"host": "localhost",
"port": 6333,
"embedding_model_dims": 768, # (For Nomic == 768), could be some other embedding size, change this according to your local models dimensions
},
},
"llm": {
"provider": "ollama"
"provider": "ollama",
"config": {
"model": "llama3.1:latest",
"temperature": 0,
"max_tokens": 8000,
"ollama_base_url": "http://localhost:11434", # Ensure this is correct
},
},
"embedder": {
"provider": "ollama"
}
"provider": "ollama",
"config": {
"model": "nomic-embed-text:latest",
# "model": "snowflake-arctic-embed:latest",
"ollama_base_url": "http://localhost:11434",
},
},
}

m = Memory.from_config(config)
Expand Down Expand Up @@ -325,4 +339,4 @@ print(history)

If you have any questions, please feel free to reach out to us using one of the following methods:

<Snippet file="get-help.mdx" />
<Snippet file="get-help.mdx" />

0 comments on commit 31ef913

Please sign in to comment.