Skip to content

Commit

Permalink
Merge pull request janhq#3751 from janhq/chore/add-new-models
Browse files Browse the repository at this point in the history
chore: add llama3.2 and qwen2.5 models
  • Loading branch information
louis-jan authored Oct 2, 2024
2 parents db0997f + 6082959 commit 0bc2f82
Show file tree
Hide file tree
Showing 16 changed files with 278 additions and 22 deletions.
2 changes: 1 addition & 1 deletion extensions/inference-nitro-extension/package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "@janhq/inference-cortex-extension",
"productName": "Cortex Inference Engine",
"version": "1.0.18",
"version": "1.0.19",
"description": "This extension embeds cortex.cpp, a lightweight inference engine written in C++. See https://jan.ai.\nAdditional dependencies could be installed to run without Cuda Toolkit installation.",
"main": "dist/index.js",
"node": "dist/node/index.cjs.js",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
"id": "command-r-34b",
"object": "model",
"name": "Command-R v01 34B Q4",
"version": "1.5",
"version": "1.6",
"description": "C4AI Command-R developed by CohereAI is optimized for a variety of use cases including reasoning, summarization, and question answering.",
"format": "gguf",
"settings": {
Expand All @@ -28,7 +28,7 @@
},
"metadata": {
"author": "CohereAI",
"tags": ["34B", "Finetuned", "Featured"],
"tags": ["34B", "Finetuned"],
"size": 21500000000
},
"engine": "nitro"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
"id": "gemma-2-27b-it",
"object": "model",
"name": "Gemma 2 27B Q4",
"version": "1.0",
"version": "1.1",
"description": "Gemma is built from the same technology with Google's Gemini.",
"format": "gguf",
"settings": {
Expand All @@ -33,8 +33,7 @@
"tags": [
"27B",
"Conversational",
"Text-generation",
"Featured"
"Text-generation"
],
"size": 16600000000
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
"id": "gemma-2-2b-it",
"object": "model",
"name": "Gemma 2 2B Q4",
"version": "1.0",
"version": "1.1",
"description": "Gemma is built from the same technology with Google's Gemini.",
"format": "gguf",
"settings": {
Expand All @@ -34,8 +34,7 @@
"2B",
"Tiny",
"Conversational",
"Text-generation",
"Featured"
"Text-generation"
],
"size": 1710000000
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
"id": "gemma-2-9b-it",
"object": "model",
"name": "Gemma 2 9B Q4",
"version": "1.0",
"version": "1.1",
"description": "Gemma is built from the same technology with Google's Gemini.",
"format": "gguf",
"settings": {
Expand All @@ -33,8 +33,7 @@
"tags": [
"9B",
"Conversational",
"Text-generation",
"Featured"
"Text-generation"
],
"size": 5760000000
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
"id": "llama3.1-70b-instruct",
"object": "model",
"name": "Llama 3.1 70B Instruct Q4",
"version": "1.1",
"version": "1.2",
"description": "Meta's Llama 3.1 excels at general usage situations, including chat, general world knowledge, and coding.",
"format": "gguf",
"settings": {
Expand All @@ -33,8 +33,7 @@
"metadata": {
"author": "MetaAI",
"tags": [
"70B",
"Featured"
"70B"
],
"size": 42500000000
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
"id": "llama3.1-8b-instruct",
"object": "model",
"name": "Llama 3.1 8B Instruct Q4",
"version": "1.1",
"version": "1.2",
"description": "Meta's Llama 3.1 excels at general usage situations, including chat, general world knowledge, and coding.",
"format": "gguf",
"settings": {
Expand All @@ -33,8 +33,7 @@
"metadata": {
"author": "MetaAI",
"tags": [
"8B",
"Featured"
"8B", "Featured"
],
"size": 4920000000
},
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
{
"sources": [
{
"filename": "Llama-3.2-1B-Instruct-Q8_0.gguf",
"url": "https://huggingface.co/bartowski/Llama-3.2-1B-Instruct-GGUF/resolve/main/Llama-3.2-1B-Instruct-Q8_0.gguf"
}
],
"id": "llama3.2-1b-instruct",
"object": "model",
"name": "Llama 3.2 1B Instruct Q8",
"version": "1.0",
"description": "Meta's Llama 3.2 excels at general usage situations, including chat, general world knowledge, and coding.",
"format": "gguf",
"settings": {
"ctx_len": 131072,
"prompt_template": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_message}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
"llama_model_path": "Llama-3.2-1B-Instruct-Q8_0.gguf",
"ngl": 33
},
"parameters": {
"temperature": 0.7,
"top_p": 0.95,
"stream": true,
"max_tokens": 8192,
"stop": ["<|end_of_text|>", "<|eot_id|>", "<|eom_id|>"],
"frequency_penalty": 0,
"presence_penalty": 0
},
"metadata": {
"author": "MetaAI",
"tags": ["1B", "Featured"],
"size": 1320000000
},
"engine": "nitro"
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
{
"sources": [
{
"filename": "Llama-3.2-3B-Instruct-Q8_0.gguf",
"url": "https://huggingface.co/bartowski/Llama-3.2-3B-Instruct-GGUF/resolve/main/Llama-3.2-3B-Instruct-Q8_0.gguf"
}
],
"id": "llama3.2-3b-instruct",
"object": "model",
"name": "Llama 3.2 3B Instruct Q8",
"version": "1.0",
"description": "Meta's Llama 3.2 excels at general usage situations, including chat, general world knowledge, and coding.",
"format": "gguf",
"settings": {
"ctx_len": 131072,
"prompt_template": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_message}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
"llama_model_path": "Llama-3.2-3B-Instruct-Q8_0.gguf",
"ngl": 33
},
"parameters": {
"temperature": 0.7,
"top_p": 0.95,
"stream": true,
"max_tokens": 8192,
"stop": ["<|end_of_text|>", "<|eot_id|>", "<|eom_id|>"],
"frequency_penalty": 0,
"presence_penalty": 0
},
"metadata": {
"author": "MetaAI",
"tags": ["3B", "Featured"],
"size": 3420000000
},
"engine": "nitro"
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
{
"sources": [
{
"filename": "Qwen2.5-14B-Instruct-Q4_K_M.gguf",
"url": "https://huggingface.co/bartowski/Qwen2.5-14B-Instruct-GGUF/resolve/main/Qwen2.5-14B-Instruct-Q4_K_M.gguf"
}
],
"id": "qwen2.5-14b-instruct",
"object": "model",
"name": "Qwen2.5 14B Instruct Q4",
"version": "1.0",
"description": "The Qwen 2.5 family is specifically designed to excel in math, coding, and other computational tasks",
"format": "gguf",
"settings": {
"ctx_len": 32768,
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
"llama_model_path": "Qwen2.5-14B-Instruct-Q4_K_M.gguf",
"ngl": 49
},
"parameters": {
"temperature": 0.7,
"top_p": 0.95,
"stream": true,
"max_tokens": 32768,
"stop": ["<|endoftext|>", "<|im_end|>"],
"frequency_penalty": 0,
"presence_penalty": 0
},
"metadata": {
"author": "QwenLM",
"tags": ["14B", "Featured"],
"size": 8990000000
},
"engine": "nitro"
}

Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
{
"sources": [
{
"filename": "Qwen2.5-32B-Instruct-Q4_K_M.gguf",
"url": "https://huggingface.co/bartowski/Qwen2.5-32B-Instruct-GGUF/resolve/main/Qwen2.5-32B-Instruct-Q4_K_M.gguf"
}
],
"id": "qwen2.5-32b-instruct",
"object": "model",
"name": "Qwen2.5 32B Instruct Q4",
"version": "1.0",
"description": "The Qwen 2.5 family is specifically designed to excel in math, coding, and other computational tasks",
"format": "gguf",
"settings": {
"ctx_len": 32768,
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
"llama_model_path": "Qwen2.5-32B-Instruct-Q4_K_M.gguf",
"ngl": 65
},
"parameters": {
"temperature": 0.7,
"top_p": 0.95,
"stream": true,
"max_tokens": 32768,
"stop": ["<|endoftext|>", "<|im_end|>"],
"frequency_penalty": 0,
"presence_penalty": 0
},
"metadata": {
"author": "QwenLM",
"tags": ["32B"],
"size": 19900000000
},
"engine": "nitro"
}

Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
{
"sources": [
{
"filename": "Qwen2.5-72B-Instruct-Q4_K_M.gguf",
"url": "https://huggingface.co/bartowski/Qwen2.5-72B-Instruct-GGUF/resolve/main/Qwen2.5-72B-Instruct-Q4_K_M.gguf"
}
],
"id": "qwen2.5-72b-instruct",
"object": "model",
"name": "Qwen2.5 72B Instruct Q4",
"version": "1.0",
"description": "The Qwen 2.5 family is specifically designed to excel in math, coding, and other computational tasks",
"format": "gguf",
"settings": {
"ctx_len": 32768,
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
"llama_model_path": "Qwen2.5-72B-Instruct-Q4_K_M.gguf",
"ngl": 81
},
"parameters": {
"temperature": 0.7,
"top_p": 0.95,
"stream": true,
"max_tokens": 32768,
"stop": ["<|endoftext|>", "<|im_end|>"],
"frequency_penalty": 0,
"presence_penalty": 0
},
"metadata": {
"author": "QwenLM",
"tags": ["72B"],
"size": 47400000000
},
"engine": "nitro"
}

Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
{
"sources": [
{
"filename": "Qwen2.5-7B-Instruct-Q4_K_M.gguf",
"url": "https://huggingface.co/bartowski/Qwen2.5-7B-Instruct-GGUF/resolve/main/Qwen2.5-7B-Instruct-Q4_K_M.gguf"
}
],
"id": "qwen2.5-7b-instruct",
"object": "model",
"name": "Qwen2.5 7B Instruct Q4",
"version": "1.0",
"description": "The Qwen 2.5 family is specifically designed to excel in math, coding, and other computational tasks",
"format": "gguf",
"settings": {
"ctx_len": 32768,
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
"llama_model_path": "Qwen2.5-7B-Instruct-Q4_K_M.gguf",
"ngl": 29
},
"parameters": {
"temperature": 0.7,
"top_p": 0.95,
"stream": true,
"max_tokens": 32768,
"stop": ["<|endoftext|>", "<|im_end|>"],
"frequency_penalty": 0,
"presence_penalty": 0
},
"metadata": {
"author": "QwenLM",
"tags": ["7B", "Featured"],
"size": 4680000000
},
"engine": "nitro"
}

Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
{
"sources": [
{
"filename": "Qwen2.5-Coder-7B-Instruct-Q4_K_M.gguf",
"url": "https://huggingface.co/bartowski/Qwen2.5-Coder-7B-Instruct-GGUF/resolve/main/Qwen2.5-Coder-7B-Instruct-Q4_K_M.gguf"
}
],
"id": "qwen2.5-coder-7b-instruct",
"object": "model",
"name": "Qwen2.5 Coder 7B Instruct Q4",
"version": "1.0",
"description": "Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models. Significantly improvements in code generation, code reasoning and code fixing.",
"format": "gguf",
"settings": {
"ctx_len": 32768,
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
"llama_model_path": "Qwen2.5-Coder-7B-Instruct-Q4_K_M.gguf",
"ngl": 29
},
"parameters": {
"temperature": 0.7,
"top_p": 0.95,
"stream": true,
"max_tokens": 32768,
"stop": ["<|endoftext|>", "<|im_end|>"],
"frequency_penalty": 0,
"presence_penalty": 0
},
"metadata": {
"author": "QwenLM",
"tags": ["7B", "Featured"],
"size": 4680000000
},
"engine": "nitro"
}

Loading

0 comments on commit 0bc2f82

Please sign in to comment.