Skip to content

Commit

Permalink
update name for models
Browse files Browse the repository at this point in the history
  • Loading branch information
hahuyhoang411 committed Dec 4, 2023
1 parent 84c940b commit 7017e64
Show file tree
Hide file tree
Showing 21 changed files with 23 additions and 23 deletions.
2 changes: 1 addition & 1 deletion models/capybara-34b/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"source_url": "https://huggingface.co/TheBloke/Nous-Capybara-34B-GGUF/resolve/main/nous-capybara-34b.Q5_K_M.gguf",
"id": "capybara-34b",
"object": "model",
"name": "Capybara 200k 34B",
"name": "Capybara 200k 34B Q5",
"version": "1.0",
"description": "Nous Capybara 34B, a variant of the Yi-34B model, is the first Nous model with a 200K context length, trained for three epochs on the innovative Capybara dataset.",
"format": "gguf",
Expand Down
4 changes: 2 additions & 2 deletions models/deepseek-coder-1.3b/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"source_url": "https://huggingface.co/TheBloke/deepseek-coder-1.3b-base-GGUF/resolve/main/deepseek-coder-1.3b-base.Q4_K_M.gguf",
"id": "deepseek-coder-1.3b",
"object": "model",
"name": "Deepseek Coder 1.3B",
"name": "Deepseek Coder 1.3B Q4",
"version": "1.0",
"description": "",
"format": "gguf",
Expand All @@ -16,7 +16,7 @@
"max_tokens": 2048
},
"metadata": {
"author": "deepseek, The Bloke",
"author": "Deepseek, The Bloke",
"tags": ["Community Recommended", "Code", "Small size"],
"size": 870000000
}
Expand Down
2 changes: 1 addition & 1 deletion models/llama2-chat-70b-q4/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
"metadata": {
"author": "MetaAI, The Bloke",
"tags": ["Foundational Model", "General", "Code"],
"size": 4080000000
"size": 43920000000
}
}

2 changes: 1 addition & 1 deletion models/llama2-chat-7b-q4/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"object": "model",
"name": "Llama 2 Chat 7B Q4",
"version": "1.0",
"description": "This is a 4-bit quantized version of Meta AI's Llama 2 Chat 7b model.",
"description": "This is a 4-bit quantized iteration of Meta AI's Llama 2 Chat 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
"format": "gguf",
"settings": {
"ctx_len": 2048,
Expand Down
2 changes: 1 addition & 1 deletion models/llama2-chat-7b-q5/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"object": "model",
"name": "Llama 2 Chat 7B Q5",
"version": "1.0",
"description": "This is a 5-bit quantized version of Meta AI's Llama 2 Chat 7b model.",
"description": "This is a 5-bit quantized iteration of Meta AI's Llama 2 Chat 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
"format": "gguf",
"settings": {
"ctx_len": 2048,
Expand Down
2 changes: 1 addition & 1 deletion models/lzlv-70b/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"source_url": "https://huggingface.co/TheBloke/lzlv_70B-GGUF/resolve/main/lzlv_70b_fp16_hf.Q5_K_M.gguf",
"id": "lzlv-70b",
"object": "model",
"name": "Lzlv 70B",
"name": "Lzlv 70B Q4",
"version": "1.0",
"description": "lzlv_70B is a sophisticated AI model designed for roleplaying and creative tasks. This merge aims to combine intelligence with creativity, seemingly outperforming its individual components in complex scenarios and creative outputs.",
"format": "gguf",
Expand Down
2 changes: 1 addition & 1 deletion models/mistral-ins-7b-q4/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"object": "model",
"name": "Mistral Instruct 7B Q4",
"version": "1.0",
"description": "This is a 4-bit quantized version of MistralAI's Mistral Instruct 7B model.",
"description": "This is a 4-bit quantized iteration of MistralAI's Mistral Instruct 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
"format": "gguf",
"settings": {
"ctx_len": 2048,
Expand Down
2 changes: 1 addition & 1 deletion models/mistral-ins-7b-q5/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"object": "model",
"name": "Mistral Instruct 7B Q5",
"version": "1.0",
"description": "This is a 5-bit quantized version of MistralAI's Mistral Instruct 7B model.",
"description": "This is a 5-bit quantized iteration of MistralAI's Mistral Instruct 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
"format": "gguf",
"settings": {
"ctx_len": 2048,
Expand Down
2 changes: 1 addition & 1 deletion models/neural-chat-7b/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"source_url": "https://huggingface.co/TheBloke/neural-chat-7B-v3-1-GGUF/resolve/main/neural-chat-7b-v3-1.Q4_K_M.gguf",
"id": "neural-chat-7b",
"object": "model",
"name": "Neural Chat 7B",
"name": "Neural Chat 7B Q4",
"version": "1.0",
"description": "The Neural Chat 7B model, developed on the foundation of mistralai/Mistral-7B-v0.1, has been fine-tuned using the Open-Orca/SlimOrca dataset and aligned with the Direct Preference Optimization (DPO) algorithm. It has demonstrated substantial improvements in various AI tasks and performance well on the open_llm_leaderboard.",
"format": "gguf",
Expand Down
2 changes: 1 addition & 1 deletion models/noromaid-20b/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"source_url": "https://huggingface.co/TheBloke/Noromaid-20B-v0.1.1-GGUF/resolve/main/noromaid-20b-v0.1.1.Q4_K_M.gguf",
"id": "noromaid-20b",
"object": "model",
"name": "Noromaid 20B",
"name": "Noromaid 20B Q4",
"version": "1.0",
"description": "The Noromaid 20b model is designed for role-playing and general use, featuring a unique touch with the no_robots dataset that enhances human-like behavior.",
"format": "gguf",
Expand Down
2 changes: 1 addition & 1 deletion models/openhermes-neural-7b/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"source_url": "https://huggingface.co/TheBloke/OpenHermes-2.5-neural-chat-7B-v3-2-7B-GGUF/resolve/main/openhermes-2.5-neural-chat-7b-v3-2-7b.Q4_K_M.gguf",
"id": "openhermes-neural-7b",
"object": "model",
"name": "OpenHermes Neural 7B",
"name": "OpenHermes Neural 7B Q4",
"version": "1.0",
"description": "OpenHermes Neural is a merged model from OpenHermes-2.5-Mistral-7B and neural-chat-7b-v3-2 with the TIES method.",
"format": "gguf",
Expand Down
2 changes: 1 addition & 1 deletion models/openorca-13b/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"source_url": "https://huggingface.co/TheBloke/Orca-2-13B-GGUF/resolve/main/orca-2-13b.Q5_K_M.gguf",
"id": "openorca-13b",
"object": "model",
"name": "Orca 2 13B",
"name": "Orca 2 13B Q5",
"version": "1.0",
"description": "Orca 2 is a finetuned version of LLAMA-2, designed primarily for single-turn responses in reasoning, reading comprehension, math problem solving, and text summarization.",
"format": "gguf",
Expand Down
2 changes: 1 addition & 1 deletion models/phind-34b/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"source_url": "https://huggingface.co/TheBloke/Phind-CodeLlama-34B-v2-GGUF/resolve/main/phind-codellama-34b-v2.Q5_K_M.gguf",
"id": "phind-34b",
"object": "model",
"name": "Phind 34B",
"name": "Phind 34B Q5",
"version": "1.0",
"description": "Phind-CodeLlama-34B-v2 is an AI model fine-tuned on 1.5B tokens of high-quality programming data. It's a SOTA open-source model in coding. This multi-lingual model excels in various programming languages, including Python, C/C++, TypeScript, Java, and is designed to be steerable and user-friendly.",
"format": "gguf",
Expand Down
2 changes: 1 addition & 1 deletion models/rocket-3b/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"source_url": "https://huggingface.co/TheBloke/rocket-3B-GGUF/resolve/main/rocket-3b.Q4_K_M.gguf",
"id": "rocket-3b",
"object": "model",
"name": "Rocket 3B",
"name": "Rocket 3B Q4",
"version": "1.0",
"description": "Rocket-3B is a GPT-like model, primarily English, fine-tuned on diverse public datasets. It outperforms larger models in benchmarks, showcasing superior understanding and text generation, making it an effective chat model for its size.",
"format": "gguf",
Expand Down
2 changes: 1 addition & 1 deletion models/starling-7b/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"source_url": "https://huggingface.co/TheBloke/Starling-LM-7B-alpha-GGUF/resolve/main/starling-lm-7b-alpha.Q4_K_M.gguf",
"id": "starling-7b",
"object": "model",
"name": "Strarling alpha 7B",
"name": "Strarling alpha 7B Q4",
"version": "1.0",
"description": "Starling-RM-7B-alpha is a language model finetuned with Reinforcement Learning from AI Feedback from Openchat 3.5. It stands out for its impressive performance using GPT-4 as a judge, making it one of the top-performing models in its category.",
"format": "gguf",
Expand Down
2 changes: 1 addition & 1 deletion models/tiefighter-13b/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"source_url": "https://huggingface.co/TheBloke/LLaMA2-13B-Tiefighter-GGUF/resolve/main/llama2-13b-tiefighter.Q5_K_M.gguf",
"id": "tiefighter-13b",
"object": "model",
"name": "Tiefighter 13B",
"name": "Tiefighter 13B Q5",
"version": "1.0",
"description": "Tiefighter-13B is a highly creative, merged AI model achieved by combining various 'LORAs' on top of an existing merge, particularly focusing on storytelling and improvisation. This model excels in story writing, chatbots, and adventuring, and is designed to perform better with less detailed inputs, leveraging its inherent creativity.",
"format": "gguf",
Expand Down
2 changes: 1 addition & 1 deletion models/tinyllama-1.1b/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"source_url": "https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v0.6/resolve/main/ggml-model-q4_0.gguf",
"id": "tinyllama-1.1b",
"object": "model",
"name": "TinyLlama Chat 1.1B",
"name": "TinyLlama Chat 1.1B Q4",
"version": "1.0",
"description": "The TinyLlama project, featuring a 1.1B parameter Llama model, is pretrained on an expansive 3 trillion token dataset. Its design ensures easy integration with various Llama-based open-source projects. Despite its smaller size, it efficiently utilizes lower computational and memory resources, drawing on GPT-4's analytical prowess to enhance its conversational abilities and versatility.",
"format": "gguf",
Expand Down
2 changes: 1 addition & 1 deletion models/wizardcoder-13b/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"source_url": "https://huggingface.co/TheBloke/WizardCoder-Python-13B-V1.0-GGUF/resolve/main/wizardcoder-python-13b-v1.0.Q5_K_M.gguf",
"id": "wizardcoder-13b",
"object": "model",
"name": "Wizard Coder Python 13B",
"name": "Wizard Coder Python 13B Q5",
"version": "1.0",
"description": "WizardCoder-Python-13B is a Python coding model major models like ChatGPT-3.5. This model based on the Llama2 architecture, demonstrate high proficiency in specific domains like coding and mathematics.",
"format": "gguf",
Expand Down
2 changes: 1 addition & 1 deletion models/wizardcoder-34b/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"source_url": "https://huggingface.co/TheBloke/WizardCoder-Python-34B-V1.0-GGUF/resolve/main/wizardcoder-python-34b-v1.0.Q5_K_M.gguf",
"id": "wizardcoder-34b",
"object": "model",
"name": "Wizard Coder Python 34B",
"name": "Wizard Coder Python 34B Q5",
"version": "1.0",
"description": "WizardCoder-Python-34B is a Python coding model major models like ChatGPT-3.5. This model based on the Llama2 architecture, demonstrate high proficiency in specific domains like coding and mathematics.",
"format": "gguf",
Expand Down
2 changes: 1 addition & 1 deletion models/yi-34b/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"source_url": "https://huggingface.co/TheBloke/Yi-34B-Chat-GGUF/resolve/main/yi-34b-chat.Q5_K_M.gguf",
"id": "yi-34b",
"object": "model",
"name": "Yi 34B",
"name": "Yi 34B Q5",
"version": "1.0",
"description": "Yi-34B, a specialized chat model, is known for its diverse and creative responses and excels across various NLP tasks and benchmarks.",
"format": "gguf",
Expand Down
4 changes: 2 additions & 2 deletions models/zephyr-beta-7b/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@
"source_url": "https://huggingface.co/TheBloke/zephyr-7B-beta-GGUF/resolve/main/zephyr-7b-beta.Q4_K_M.gguf",
"id": "zephyr-beta-7b",
"object": "model",
"name": "Zephyr Beta 7B",
"name": "Zephyr Beta 7B Q4",
"version": "1.0",
"description": "The Zephyr-7B-β model marks the second iteration in the Zephyr series, designed to function as an effective assistant. It has been fine-tuned from the mistralai/Mistral-7B-v0.1 base model, utilizing a combination of public and synthetic datasets with the application of Direct Preference Optimization.",
"description": "The Zephyr-7B-β model is trained by HuggingFace, designed to function as a practical assistant. It has been fine-tuned from the mistralai/Mistral-7B-v0.1 base model, utilizing a combination of public and synthetic datasets with the application of Direct Preference Optimization.",
"format": "gguf",
"settings": {
"ctx_len": 2048,
Expand Down

0 comments on commit 7017e64

Please sign in to comment.