From 22480a9d3dc149c0d9cb2542293a251f9f9cd696 Mon Sep 17 00:00:00 2001 From: vual <25891219+vual@users.noreply.github.com> Date: Mon, 25 Nov 2024 02:53:07 +0800 Subject: [PATCH] =?UTF-8?q?=F0=9F=92=84=20style:=20support=20to=20reset=20?= =?UTF-8?q?fetched=20models=20(#4260)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 增加【清除获取的模型】的功能,主要是用于在获取模型列表后,不想要了,需要恢复系统自带的模型。 * revert * 🌐 chore: update locale * 🎨 refactor: refactor code --------- Co-authored-by: arvinxx --- docs/self-hosting/platform/alibaba-cloud.mdx | 5 +++- docs/self-hosting/platform/btpanel.mdx | 6 +++- docs/self-hosting/platform/btpanel.zh-CN.mdx | 4 ++- docs/self-hosting/server-database.mdx | 2 +- locales/ar/models.json | 2 ++ locales/ar/setting.json | 1 + locales/bg-BG/models.json | 2 ++ locales/bg-BG/setting.json | 1 + locales/de-DE/models.json | 2 ++ locales/de-DE/setting.json | 1 + locales/en-US/models.json | 2 ++ locales/en-US/setting.json | 1 + locales/es-ES/models.json | 2 ++ locales/es-ES/setting.json | 1 + locales/fa-IR/models.json | 2 ++ locales/fa-IR/setting.json | 1 + locales/fr-FR/models.json | 2 ++ locales/fr-FR/setting.json | 1 + locales/it-IT/models.json | 2 ++ locales/it-IT/setting.json | 1 + locales/ja-JP/models.json | 2 ++ locales/ja-JP/setting.json | 1 + locales/ko-KR/models.json | 2 ++ locales/ko-KR/setting.json | 1 + locales/nl-NL/models.json | 2 ++ locales/nl-NL/setting.json | 1 + locales/pl-PL/models.json | 2 ++ locales/pl-PL/setting.json | 1 + locales/pt-BR/models.json | 2 ++ locales/pt-BR/setting.json | 1 + locales/ru-RU/models.json | 2 ++ locales/ru-RU/setting.json | 1 + locales/tr-TR/models.json | 2 ++ locales/tr-TR/setting.json | 1 + locales/vi-VN/models.json | 2 ++ locales/vi-VN/setting.json | 1 + locales/zh-CN/models.json | 6 ++-- locales/zh-CN/setting.json | 1 + locales/zh-TW/models.json | 2 ++ locales/zh-TW/setting.json | 1 + .../ProviderModelList/ModelFetcher.tsx | 29 +++++++++++++++---- src/app/layout.tsx | 6 ++-- src/locales/default/setting.ts | 1 + src/store/user/slices/modelList/action.ts | 17 ++++++++++- 44 files changed, 113 insertions(+), 15 deletions(-) diff --git a/docs/self-hosting/platform/alibaba-cloud.mdx b/docs/self-hosting/platform/alibaba-cloud.mdx index b3d1e891389d..1919ec05053e 100644 --- a/docs/self-hosting/platform/alibaba-cloud.mdx +++ b/docs/self-hosting/platform/alibaba-cloud.mdx @@ -1,6 +1,9 @@ --- title: Deploy LobeChat on Alibaba Cloud -description: Learn how to deploy the LobeChat application on Alibaba Cloud, including preparing the large model API Key, clicking the deploy button, and other operations. +description: >- + Learn how to deploy the LobeChat application on Alibaba Cloud, including + preparing the large model API Key, clicking the deploy button, and other + operations. tags: - Alibaba Cloud - LobeChat diff --git a/docs/self-hosting/platform/btpanel.mdx b/docs/self-hosting/platform/btpanel.mdx index 5b0431848f20..7eee0918ffa6 100644 --- a/docs/self-hosting/platform/btpanel.mdx +++ b/docs/self-hosting/platform/btpanel.mdx @@ -1,6 +1,10 @@ --- title: Deploy LobeChat using aaPanel -description: Learn how to deploy the LobeChat service using aaPanel-Docker, including installing the Docker container environment and using the command to start the service with one click. Detailed instructions on how to configure environment variables and use proxy addresses. +description: >- + Learn how to deploy the LobeChat service using aaPanel-Docker, including + installing the Docker container environment and using the command to start the + service with one click. Detailed instructions on how to configure environment + variables and use proxy addresses. tags: - Docker - LobeChat diff --git a/docs/self-hosting/platform/btpanel.zh-CN.mdx b/docs/self-hosting/platform/btpanel.zh-CN.mdx index c90a0583910a..634c98b87f24 100644 --- a/docs/self-hosting/platform/btpanel.zh-CN.mdx +++ b/docs/self-hosting/platform/btpanel.zh-CN.mdx @@ -1,6 +1,8 @@ --- title: 通过 宝塔面板Docker应用商店 部署 LobeChat -description: 学习如何使用 宝塔面板Docker应用 部署 LobeChat 服务,包括安装 Docker 容器环境和使用指令一键启动服务。详细说明如何配置环境变量和使用代理地址。 +description: >- + 学习如何使用 宝塔面板Docker应用 部署 LobeChat 服务,包括安装 Docker + 容器环境和使用指令一键启动服务。详细说明如何配置环境变量和使用代理地址。 tags: - Docker - LobeChat diff --git a/docs/self-hosting/server-database.mdx b/docs/self-hosting/server-database.mdx index 97483e745de3..7cae138a4d28 100644 --- a/docs/self-hosting/server-database.mdx +++ b/docs/self-hosting/server-database.mdx @@ -141,4 +141,4 @@ For detailed configuration guidelines on S3, please refer to [S3 Object Storage] The above is a detailed explanation of configuring LobeChat with a server-side database. You can configure it according to your actual situation and then choose a deployment platform that suits you to start deployment: - \ No newline at end of file + diff --git a/locales/ar/models.json b/locales/ar/models.json index 70d45dd20bf9..4312d48cdb9e 100644 --- a/locales/ar/models.json +++ b/locales/ar/models.json @@ -94,6 +94,7 @@ "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO هو دمج متعدد النماذج مرن للغاية، يهدف إلى تقديم تجربة إبداعية ممتازة." }, + "NousResearch/Hermes-3-Llama-3.1-8B": {}, "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) هو نموذج تعليمات عالي الدقة، مناسب للحسابات المعقدة." }, @@ -846,6 +847,7 @@ "meta.llama3-8b-instruct-v1:0": { "description": "Meta Llama 3 هو نموذج لغوي كبير مفتوح (LLM) موجه للمطورين والباحثين والشركات، يهدف إلى مساعدتهم في بناء وتجربة وتوسيع أفكارهم في الذكاء الاصطناعي بشكل مسؤول. كجزء من نظام الابتكار المجتمعي العالمي، فهو مثالي للأجهزة ذات القدرة الحاسوبية والموارد المحدودة، والأجهزة الطرفية، وأوقات التدريب الأسرع." }, + "microsoft/Phi-3.5-mini-instruct": {}, "microsoft/wizardlm 2-7b": { "description": "WizardLM 2 7B هو أحدث نموذج خفيف الوزن وسريع من Microsoft AI، ويقترب أداؤه من 10 أضعاف النماذج الرائدة المفتوحة المصدر الحالية." }, diff --git a/locales/ar/setting.json b/locales/ar/setting.json index 3f80b632682c..22d305ec75ae 100644 --- a/locales/ar/setting.json +++ b/locales/ar/setting.json @@ -98,6 +98,7 @@ "title": "استخدام طريقة طلب العميل" }, "fetcher": { + "clear": "مسح النموذج المستخرج", "fetch": "احصل على قائمة النماذج", "fetching": "جاري الحصول على قائمة النماذج...", "latestTime": "آخر تحديث: {{time}}", diff --git a/locales/bg-BG/models.json b/locales/bg-BG/models.json index de80d5fc30c3..85bf28ee7aa2 100644 --- a/locales/bg-BG/models.json +++ b/locales/bg-BG/models.json @@ -94,6 +94,7 @@ "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO е високо гъвкава многомоделна комбинация, предназначена да предостави изключителен креативен опит." }, + "NousResearch/Hermes-3-Llama-3.1-8B": {}, "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) е модел с висока точност за инструкции, подходящ за сложни изчисления." }, @@ -846,6 +847,7 @@ "meta.llama3-8b-instruct-v1:0": { "description": "Meta Llama 3 е отворен голям езиков модел (LLM), насочен към разработчици, изследователи и предприятия, предназначен да им помогне да изградят, експериментират и отговорно разширят своите идеи за генеративен ИИ. Като част от основната система на глобалната общност за иновации, той е особено подходящ за устройства с ограничени изчислителни ресурси и по-бързо време за обучение." }, + "microsoft/Phi-3.5-mini-instruct": {}, "microsoft/wizardlm 2-7b": { "description": "WizardLM 2 7B е най-новият бърз и лек модел на Microsoft AI, с производителност, близка до 10 пъти на съществуващите водещи отворени модели." }, diff --git a/locales/bg-BG/setting.json b/locales/bg-BG/setting.json index e5af36ad81c3..7e144703733f 100644 --- a/locales/bg-BG/setting.json +++ b/locales/bg-BG/setting.json @@ -98,6 +98,7 @@ "title": "Използване на режим на заявка от клиента" }, "fetcher": { + "clear": "Изчисти получената модел", "fetch": "Изтегляне на списъка с модели", "fetching": "Изтегляне на списъка с модели...", "latestTime": "Последно актуализирано: {{time}}", diff --git a/locales/de-DE/models.json b/locales/de-DE/models.json index d6fe33c6eb5f..16d770c0deeb 100644 --- a/locales/de-DE/models.json +++ b/locales/de-DE/models.json @@ -94,6 +94,7 @@ "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO ist eine hochflexible Multi-Modell-Kombination, die darauf abzielt, außergewöhnliche kreative Erlebnisse zu bieten." }, + "NousResearch/Hermes-3-Llama-3.1-8B": {}, "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) ist ein hochpräzises Anweisungsmodell, das für komplexe Berechnungen geeignet ist." }, @@ -846,6 +847,7 @@ "meta.llama3-8b-instruct-v1:0": { "description": "Meta Llama 3 ist ein offenes großes Sprachmodell (LLM), das sich an Entwickler, Forscher und Unternehmen richtet und ihnen hilft, ihre Ideen für generative KI zu entwickeln, zu experimentieren und verantwortungsbewusst zu skalieren. Als Teil eines globalen Innovationssystems ist es besonders geeignet für Umgebungen mit begrenzter Rechenleistung und Ressourcen, für Edge-Geräte und schnellere Trainingszeiten." }, + "microsoft/Phi-3.5-mini-instruct": {}, "microsoft/wizardlm 2-7b": { "description": "WizardLM 2 7B ist das neueste schnelle und leichte Modell von Microsoft AI, dessen Leistung fast zehnmal so hoch ist wie die bestehender führender Open-Source-Modelle." }, diff --git a/locales/de-DE/setting.json b/locales/de-DE/setting.json index 0e3b1a6e3e50..d0a071e90661 100644 --- a/locales/de-DE/setting.json +++ b/locales/de-DE/setting.json @@ -98,6 +98,7 @@ "title": "Client Fetch-Modus verwenden" }, "fetcher": { + "clear": "Abgerufenes Modell löschen", "fetch": "Modelle abrufen", "fetching": "Modelle werden abgerufen...", "latestTime": "Letzte Aktualisierung: {{time}}", diff --git a/locales/en-US/models.json b/locales/en-US/models.json index 9c7c2de2ea12..c002c472d7bb 100644 --- a/locales/en-US/models.json +++ b/locales/en-US/models.json @@ -94,6 +94,7 @@ "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO is a highly flexible multi-model fusion designed to provide an exceptional creative experience." }, + "NousResearch/Hermes-3-Llama-3.1-8B": {}, "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) is a high-precision instruction model suitable for complex computations." }, @@ -846,6 +847,7 @@ "meta.llama3-8b-instruct-v1:0": { "description": "Meta Llama 3 is an open large language model (LLM) aimed at developers, researchers, and enterprises, designed to help them build, experiment, and responsibly scale their generative AI ideas. As part of a foundational system for global community innovation, it is particularly suitable for those with limited computational power and resources, edge devices, and faster training times." }, + "microsoft/Phi-3.5-mini-instruct": {}, "microsoft/wizardlm 2-7b": { "description": "WizardLM 2 7B is Microsoft's latest lightweight AI model, performing nearly ten times better than existing leading open-source models." }, diff --git a/locales/en-US/setting.json b/locales/en-US/setting.json index e700f9f66542..5f29e6b4b06f 100644 --- a/locales/en-US/setting.json +++ b/locales/en-US/setting.json @@ -98,6 +98,7 @@ "title": "Use Client-Side Fetching Mode" }, "fetcher": { + "clear": "Clear fetched model", "fetch": "Get Model List", "fetching": "Fetching Model List...", "latestTime": "Last Updated: {{time}}", diff --git a/locales/es-ES/models.json b/locales/es-ES/models.json index 4efbc6ec80c0..6a6ffeb73544 100644 --- a/locales/es-ES/models.json +++ b/locales/es-ES/models.json @@ -94,6 +94,7 @@ "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO es una fusión de múltiples modelos altamente flexible, diseñada para ofrecer una experiencia creativa excepcional." }, + "NousResearch/Hermes-3-Llama-3.1-8B": {}, "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) es un modelo de instrucciones de alta precisión, adecuado para cálculos complejos." }, @@ -846,6 +847,7 @@ "meta.llama3-8b-instruct-v1:0": { "description": "Meta Llama 3 es un modelo de lenguaje de gran tamaño (LLM) abierto dirigido a desarrolladores, investigadores y empresas, diseñado para ayudarles a construir, experimentar y escalar de manera responsable sus ideas de IA generativa. Como parte de un sistema base para la innovación de la comunidad global, es ideal para dispositivos de borde con recursos y capacidades computacionales limitadas, así como para tiempos de entrenamiento más rápidos." }, + "microsoft/Phi-3.5-mini-instruct": {}, "microsoft/wizardlm 2-7b": { "description": "WizardLM 2 7B es el último modelo ligero y rápido de Microsoft AI, con un rendimiento cercano a 10 veces el de los modelos líderes de código abierto existentes." }, diff --git a/locales/es-ES/setting.json b/locales/es-ES/setting.json index ce1008003d0b..6607820a53c9 100644 --- a/locales/es-ES/setting.json +++ b/locales/es-ES/setting.json @@ -98,6 +98,7 @@ "title": "Usar el modo de solicitud en el cliente" }, "fetcher": { + "clear": "Eliminar el modelo obtenido", "fetch": "Obtener lista de modelos", "fetching": "Obteniendo lista de modelos...", "latestTime": "Última actualización: {{time}}", diff --git a/locales/fa-IR/models.json b/locales/fa-IR/models.json index 2484f11749b5..2c96966edfbc 100644 --- a/locales/fa-IR/models.json +++ b/locales/fa-IR/models.json @@ -94,6 +94,7 @@ "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO یک ترکیب چندمدلی بسیار انعطاف‌پذیر است که برای ارائه تجربه‌ای خلاقانه و برجسته طراحی شده است." }, + "NousResearch/Hermes-3-Llama-3.1-8B": {}, "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) یک مدل دستورالعمل با دقت بالا است که برای محاسبات پیچیده مناسب است." }, @@ -846,6 +847,7 @@ "meta.llama3-8b-instruct-v1:0": { "description": "Meta Llama 3 یک مدل زبان بزرگ باز (LLM) است که برای توسعه‌دهندگان، پژوهشگران و شرکت‌ها طراحی شده است تا به آن‌ها در ساخت، آزمایش و گسترش مسئولانه ایده‌های هوش مصنوعی مولد کمک کند. به عنوان بخشی از سیستم پایه نوآوری جامعه جهانی، این مدل برای دستگاه‌های با توان محاسباتی و منابع محدود، دستگاه‌های لبه و زمان‌های آموزش سریع‌تر بسیار مناسب است." }, + "microsoft/Phi-3.5-mini-instruct": {}, "microsoft/wizardlm 2-7b": { "description": "WizardLM 2 7B مدل جدید و سبک وزن AI مایکروسافت است که عملکرد آن نزدیک به 10 برابر مدل‌های پیشرو متن‌باز موجود است." }, diff --git a/locales/fa-IR/setting.json b/locales/fa-IR/setting.json index ac484eed8935..e618023e66ea 100644 --- a/locales/fa-IR/setting.json +++ b/locales/fa-IR/setting.json @@ -98,6 +98,7 @@ "title": "استفاده از حالت درخواست از سمت کلاینت" }, "fetcher": { + "clear": "پاک کردن مدل‌های دریافت شده", "fetch": "دریافت لیست مدل‌ها", "fetching": "در حال دریافت لیست مدل‌ها...", "latestTime": "آخرین زمان به‌روزرسانی: {{time}}", diff --git a/locales/fr-FR/models.json b/locales/fr-FR/models.json index 588bff4670cd..ff9f81bbdf7a 100644 --- a/locales/fr-FR/models.json +++ b/locales/fr-FR/models.json @@ -94,6 +94,7 @@ "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO est une fusion de modèles hautement flexible, visant à offrir une expérience créative exceptionnelle." }, + "NousResearch/Hermes-3-Llama-3.1-8B": {}, "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) est un modèle d'instructions de haute précision, adapté aux calculs complexes." }, @@ -846,6 +847,7 @@ "meta.llama3-8b-instruct-v1:0": { "description": "Meta Llama 3 est un modèle de langage ouvert (LLM) destiné aux développeurs, chercheurs et entreprises, conçu pour les aider à construire, expérimenter et étendre de manière responsable leurs idées d'IA générative. En tant que partie intégrante d'un système de base pour l'innovation de la communauté mondiale, il est particulièrement adapté aux appareils à capacité de calcul et de ressources limitées, ainsi qu'à des temps d'entraînement plus rapides." }, + "microsoft/Phi-3.5-mini-instruct": {}, "microsoft/wizardlm 2-7b": { "description": "WizardLM 2 7B est le dernier modèle léger et rapide de Microsoft AI, offrant des performances proches de dix fois celles des modèles leaders open source existants." }, diff --git a/locales/fr-FR/setting.json b/locales/fr-FR/setting.json index 48c4c3f6d63a..3a6112f8676a 100644 --- a/locales/fr-FR/setting.json +++ b/locales/fr-FR/setting.json @@ -98,6 +98,7 @@ "title": "Utiliser le mode de requête client" }, "fetcher": { + "clear": "Effacer le modèle récupéré", "fetch": "Obtenir la liste des modèles", "fetching": "Récupération de la liste des modèles en cours...", "latestTime": "Dernière mise à jour : {{time}}", diff --git a/locales/it-IT/models.json b/locales/it-IT/models.json index b302db6fc37c..861aa57d022c 100644 --- a/locales/it-IT/models.json +++ b/locales/it-IT/models.json @@ -94,6 +94,7 @@ "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO è un modello altamente flessibile, progettato per offrire un'esperienza creativa eccezionale." }, + "NousResearch/Hermes-3-Llama-3.1-8B": {}, "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) è un modello di istruzioni ad alta precisione, adatto per calcoli complessi." }, @@ -846,6 +847,7 @@ "meta.llama3-8b-instruct-v1:0": { "description": "Meta Llama 3 è un modello di linguaggio di grandi dimensioni (LLM) open source progettato per sviluppatori, ricercatori e aziende, per aiutarli a costruire, sperimentare e scalare responsabilmente le loro idee di AI generativa. Come parte di un sistema di base per l'innovazione della comunità globale, è particolarmente adatto per dispositivi a bassa potenza e risorse limitate, oltre a garantire tempi di addestramento più rapidi." }, + "microsoft/Phi-3.5-mini-instruct": {}, "microsoft/wizardlm 2-7b": { "description": "WizardLM 2 7B è il modello leggero e veloce più recente di Microsoft AI, con prestazioni vicine a quelle dei modelli leader open source esistenti." }, diff --git a/locales/it-IT/setting.json b/locales/it-IT/setting.json index 4e9396121998..6e317ccbb21a 100644 --- a/locales/it-IT/setting.json +++ b/locales/it-IT/setting.json @@ -98,6 +98,7 @@ "title": "Utilizzo del modo di richiesta del client" }, "fetcher": { + "clear": "Cancella il modello ottenuto", "fetch": "Ottenere l'elenco dei modelli", "fetching": "Recupero dell'elenco dei modelli in corso...", "latestTime": "Ultimo aggiornamento: {{time}}", diff --git a/locales/ja-JP/models.json b/locales/ja-JP/models.json index bf81d722e6f5..6efc76ca55a2 100644 --- a/locales/ja-JP/models.json +++ b/locales/ja-JP/models.json @@ -94,6 +94,7 @@ "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPOは非常に柔軟なマルチモデル統合で、卓越した創造的体験を提供することを目的としています。" }, + "NousResearch/Hermes-3-Llama-3.1-8B": {}, "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B)は、高精度の指示モデルであり、複雑な計算に適しています。" }, @@ -846,6 +847,7 @@ "meta.llama3-8b-instruct-v1:0": { "description": "Meta Llama 3は、開発者、研究者、企業向けのオープンな大規模言語モデル(LLM)であり、生成AIのアイデアを構築、実験、責任を持って拡張するのを支援することを目的としています。世界的なコミュニティの革新の基盤システムの一部として、計算能力とリソースが限られたエッジデバイスや、より迅速なトレーニング時間に非常に適しています。" }, + "microsoft/Phi-3.5-mini-instruct": {}, "microsoft/wizardlm 2-7b": { "description": "WizardLM 2 7BはMicrosoft AIの最新の高速軽量モデルで、既存のオープンソースリーダーモデルの10倍に近い性能を持っています。" }, diff --git a/locales/ja-JP/setting.json b/locales/ja-JP/setting.json index 8d1b59b4af32..d13e48096716 100644 --- a/locales/ja-JP/setting.json +++ b/locales/ja-JP/setting.json @@ -98,6 +98,7 @@ "title": "クライアントサイドリクエストモードの使用" }, "fetcher": { + "clear": "取得したモデルをクリア", "fetch": "モデルリストを取得する", "fetching": "モデルリストを取得中...", "latestTime": "最終更新時間:{{time}}", diff --git a/locales/ko-KR/models.json b/locales/ko-KR/models.json index f83616e6b54e..4647ae0ee8d1 100644 --- a/locales/ko-KR/models.json +++ b/locales/ko-KR/models.json @@ -94,6 +94,7 @@ "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO는 뛰어난 창의적 경험을 제공하기 위해 설계된 고도로 유연한 다중 모델 통합입니다." }, + "NousResearch/Hermes-3-Llama-3.1-8B": {}, "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B)는 고정밀 지시 모델로, 복잡한 계산에 적합합니다." }, @@ -846,6 +847,7 @@ "meta.llama3-8b-instruct-v1:0": { "description": "Meta Llama 3은 개발자, 연구자 및 기업을 위한 오픈 대형 언어 모델(LLM)로, 생성 AI 아이디어를 구축하고 실험하며 책임감 있게 확장하는 데 도움을 주기 위해 설계되었습니다. 전 세계 커뮤니티 혁신의 기초 시스템의 일환으로, 계산 능력과 자원이 제한된 환경, 엣지 장치 및 더 빠른 훈련 시간에 매우 적합합니다." }, + "microsoft/Phi-3.5-mini-instruct": {}, "microsoft/wizardlm 2-7b": { "description": "WizardLM 2 7B는 Microsoft AI의 최신 경량 모델로, 기존 오픈 소스 선도 모델의 성능에 근접합니다." }, diff --git a/locales/ko-KR/setting.json b/locales/ko-KR/setting.json index 56b9c05a0c92..c1058f8efa2a 100644 --- a/locales/ko-KR/setting.json +++ b/locales/ko-KR/setting.json @@ -98,6 +98,7 @@ "title": "클라이언트 요청 모드 사용" }, "fetcher": { + "clear": "가져온 모델 지우기", "fetch": "모델 목록 가져오기", "fetching": "모델 목록을 가져오는 중...", "latestTime": "마지막 업데이트 시간: {{time}}", diff --git a/locales/nl-NL/models.json b/locales/nl-NL/models.json index 2eaeadcdda22..3cbe517d5d66 100644 --- a/locales/nl-NL/models.json +++ b/locales/nl-NL/models.json @@ -94,6 +94,7 @@ "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO is een zeer flexibele multi-model combinatie, ontworpen om een uitstekende creatieve ervaring te bieden." }, + "NousResearch/Hermes-3-Llama-3.1-8B": {}, "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) is een hoogprecisie instructiemodel, geschikt voor complexe berekeningen." }, @@ -846,6 +847,7 @@ "meta.llama3-8b-instruct-v1:0": { "description": "Meta Llama 3 is een open groot taalmodel (LLM) gericht op ontwikkelaars, onderzoekers en bedrijven, ontworpen om hen te helpen bij het bouwen, experimenteren en verantwoordelijk opschalen van hun generatieve AI-ideeën. Als onderdeel van het basis systeem voor wereldwijde gemeenschapsinnovatie is het zeer geschikt voor apparaten met beperkte rekenkracht en middelen, edge-apparaten en snellere trainingstijden." }, + "microsoft/Phi-3.5-mini-instruct": {}, "microsoft/wizardlm 2-7b": { "description": "WizardLM 2 7B is het nieuwste snelle en lichte model van Microsoft AI, met prestaties die bijna 10 keer beter zijn dan de huidige toonaangevende open-source modellen." }, diff --git a/locales/nl-NL/setting.json b/locales/nl-NL/setting.json index fa5a0b99f913..25c28c4ecee2 100644 --- a/locales/nl-NL/setting.json +++ b/locales/nl-NL/setting.json @@ -98,6 +98,7 @@ "title": "Gebruik de ophaalmodus aan de clientzijde" }, "fetcher": { + "clear": "Verwijder opgehaalde model", "fetch": "Haal model lijst op", "fetching": "Model lijst wordt opgehaald...", "latestTime": "Laatst bijgewerkt: {{time}}", diff --git a/locales/pl-PL/models.json b/locales/pl-PL/models.json index 30fe99f899d5..0cdef041e56b 100644 --- a/locales/pl-PL/models.json +++ b/locales/pl-PL/models.json @@ -94,6 +94,7 @@ "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO to wysoce elastyczna fuzja wielu modeli, mająca na celu zapewnienie doskonałego doświadczenia twórczego." }, + "NousResearch/Hermes-3-Llama-3.1-8B": {}, "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) to model poleceń o wysokiej precyzji, idealny do złożonych obliczeń." }, @@ -846,6 +847,7 @@ "meta.llama3-8b-instruct-v1:0": { "description": "Meta Llama 3 to otwarty duży model językowy (LLM) skierowany do deweloperów, badaczy i przedsiębiorstw, mający na celu pomoc w budowaniu, eksperymentowaniu i odpowiedzialnym rozwijaniu ich pomysłów na generatywną sztuczną inteligencję. Jako część podstawowego systemu innowacji globalnej społeczności, jest idealny dla urządzeń o ograniczonej mocy obliczeniowej i zasobach, a także dla szybszego czasu szkolenia." }, + "microsoft/Phi-3.5-mini-instruct": {}, "microsoft/wizardlm 2-7b": { "description": "WizardLM 2 7B to najnowszy szybki i lekki model AI od Microsoftu, osiągający wydajność bliską 10-krotności istniejących wiodących modeli open source." }, diff --git a/locales/pl-PL/setting.json b/locales/pl-PL/setting.json index f036d98b36fd..aa51e095d993 100644 --- a/locales/pl-PL/setting.json +++ b/locales/pl-PL/setting.json @@ -98,6 +98,7 @@ "title": "使用客户端请求模式" }, "fetcher": { + "clear": "Wyczyść pobrany model", "fetch": "Pobierz listę modeli", "fetching": "Trwa pobieranie listy modeli...", "latestTime": "Ostatnia aktualizacja: {{time}}", diff --git a/locales/pt-BR/models.json b/locales/pt-BR/models.json index 9fb5132afa9e..211ce0f50801 100644 --- a/locales/pt-BR/models.json +++ b/locales/pt-BR/models.json @@ -94,6 +94,7 @@ "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO é uma fusão de múltiplos modelos altamente flexível, projetada para oferecer uma experiência criativa excepcional." }, + "NousResearch/Hermes-3-Llama-3.1-8B": {}, "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) é um modelo de instrução de alta precisão, adequado para cálculos complexos." }, @@ -846,6 +847,7 @@ "meta.llama3-8b-instruct-v1:0": { "description": "Meta Llama 3 é um modelo de linguagem de grande escala (LLM) aberto voltado para desenvolvedores, pesquisadores e empresas, projetado para ajudá-los a construir, experimentar e expandir suas ideias de IA geradora de forma responsável. Como parte de um sistema de base para inovação da comunidade global, é ideal para dispositivos de borda com capacidade de computação e recursos limitados, além de tempos de treinamento mais rápidos." }, + "microsoft/Phi-3.5-mini-instruct": {}, "microsoft/wizardlm 2-7b": { "description": "WizardLM 2 7B é o modelo leve e rápido mais recente da Microsoft AI, com desempenho próximo a 10 vezes o de modelos de código aberto existentes." }, diff --git a/locales/pt-BR/setting.json b/locales/pt-BR/setting.json index 1c950d217713..508d72e31909 100644 --- a/locales/pt-BR/setting.json +++ b/locales/pt-BR/setting.json @@ -98,6 +98,7 @@ "title": "Usar o modo de solicitação do cliente" }, "fetcher": { + "clear": "Limpar o modelo obtido", "fetch": "Obter lista de modelos", "fetching": "Obtendo lista de modelos...", "latestTime": "Última atualização: {{time}}", diff --git a/locales/ru-RU/models.json b/locales/ru-RU/models.json index 75329553b962..b499ea6f4577 100644 --- a/locales/ru-RU/models.json +++ b/locales/ru-RU/models.json @@ -94,6 +94,7 @@ "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO — это высокоадаптивная многомодельная комбинация, предназначенная для предоставления выдающегося творческого опыта." }, + "NousResearch/Hermes-3-Llama-3.1-8B": {}, "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) — это высокоточная модель команд, подходящая для сложных вычислений." }, @@ -846,6 +847,7 @@ "meta.llama3-8b-instruct-v1:0": { "description": "Meta Llama 3 — это открытая большая языковая модель (LLM), ориентированная на разработчиков, исследователей и предприятия, предназначенная для помощи в создании, экспериментировании и ответственном масштабировании их идей по генеративному ИИ. В качестве части базовой системы для инноваций глобального сообщества она идеально подходит для устройств с ограниченными вычислительными мощностями и ресурсами, а также для более быстрого времени обучения." }, + "microsoft/Phi-3.5-mini-instruct": {}, "microsoft/wizardlm 2-7b": { "description": "WizardLM 2 7B — это новая быстрая и легкая модель от Microsoft AI, производительность которой близка к 10-кратной производительности существующих открытых моделей." }, diff --git a/locales/ru-RU/setting.json b/locales/ru-RU/setting.json index 8fa445679697..3eb8599d808b 100644 --- a/locales/ru-RU/setting.json +++ b/locales/ru-RU/setting.json @@ -98,6 +98,7 @@ "title": "Использовать режим запроса с клиента" }, "fetcher": { + "clear": "Очистить полученную модель", "fetch": "Получить список моделей", "fetching": "Идет получение списка моделей...", "latestTime": "Последнее обновление: {{time}}", diff --git a/locales/tr-TR/models.json b/locales/tr-TR/models.json index 1a89531b6f56..31be6def624b 100644 --- a/locales/tr-TR/models.json +++ b/locales/tr-TR/models.json @@ -94,6 +94,7 @@ "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO, olağanüstü yaratıcı deneyimler sunmak için tasarlanmış son derece esnek bir çoklu model birleşimidir." }, + "NousResearch/Hermes-3-Llama-3.1-8B": {}, "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B), karmaşık hesaplamalar için yüksek hassasiyetli bir talimat modelidir." }, @@ -846,6 +847,7 @@ "meta.llama3-8b-instruct-v1:0": { "description": "Meta Llama 3, geliştiriciler, araştırmacılar ve işletmeler için açık bir büyük dil modelidir (LLM) ve onların üretken AI fikirlerini inşa etmelerine, denemelerine ve sorumlu bir şekilde genişletmelerine yardımcı olmak için tasarlanmıştır. Küresel topluluk yeniliğinin temel sistemlerinden biri olarak, sınırlı hesaplama gücü ve kaynaklara sahip, kenar cihazları ve daha hızlı eğitim süreleri için son derece uygundur." }, + "microsoft/Phi-3.5-mini-instruct": {}, "microsoft/wizardlm 2-7b": { "description": "WizardLM 2 7B, Microsoft AI'nın en son hızlı ve hafif modelidir ve mevcut açık kaynak lider modellerin performansına yakın bir performans sunmaktadır." }, diff --git a/locales/tr-TR/setting.json b/locales/tr-TR/setting.json index eeab91f2c81c..2d2474700a04 100644 --- a/locales/tr-TR/setting.json +++ b/locales/tr-TR/setting.json @@ -98,6 +98,7 @@ "title": "İstemci Tarafından Veri Alımı" }, "fetcher": { + "clear": "Alınan modeli temizle", "fetch": "Modelleri Al", "fetching": "Modelleri alınıyor...", "latestTime": "Son güncelleme zamanı: {{time}}", diff --git a/locales/vi-VN/models.json b/locales/vi-VN/models.json index d55a063386e2..4746ffff5cc2 100644 --- a/locales/vi-VN/models.json +++ b/locales/vi-VN/models.json @@ -94,6 +94,7 @@ "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO là một mô hình kết hợp đa dạng, nhằm cung cấp trải nghiệm sáng tạo xuất sắc." }, + "NousResearch/Hermes-3-Llama-3.1-8B": {}, "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) là mô hình chỉ dẫn chính xác cao, phù hợp cho tính toán phức tạp." }, @@ -846,6 +847,7 @@ "meta.llama3-8b-instruct-v1:0": { "description": "Meta Llama 3 là một mô hình ngôn ngữ lớn (LLM) mở dành cho các nhà phát triển, nhà nghiên cứu và doanh nghiệp, nhằm giúp họ xây dựng, thử nghiệm và mở rộng ý tưởng AI sinh một cách có trách nhiệm. Là một phần của hệ thống cơ sở hạ tầng đổi mới toàn cầu, nó rất phù hợp cho các thiết bị biên và thời gian huấn luyện nhanh hơn với khả năng tính toán và tài nguyên hạn chế." }, + "microsoft/Phi-3.5-mini-instruct": {}, "microsoft/wizardlm 2-7b": { "description": "WizardLM 2 7B là mô hình nhẹ và nhanh mới nhất của Microsoft AI, hiệu suất gần gấp 10 lần so với các mô hình mở nguồn hiện có." }, diff --git a/locales/vi-VN/setting.json b/locales/vi-VN/setting.json index e8ff9d6613e2..aa14d7e5b4d3 100644 --- a/locales/vi-VN/setting.json +++ b/locales/vi-VN/setting.json @@ -98,6 +98,7 @@ "title": "Sử dụng chế độ yêu cầu từ khách hàng" }, "fetcher": { + "clear": "Xóa mô hình đã lấy", "fetch": "Lấy danh sách mô hình", "fetching": "Đang lấy danh sách mô hình...", "latestTime": "Thời gian cập nhật lần cuối: {{time}}", diff --git a/locales/zh-CN/models.json b/locales/zh-CN/models.json index aba349d33a79..ae220c63fb03 100644 --- a/locales/zh-CN/models.json +++ b/locales/zh-CN/models.json @@ -94,6 +94,7 @@ "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO 是一款高度灵活的多模型合并,旨在提供卓越的创造性体验。" }, + "NousResearch/Hermes-3-Llama-3.1-8B": {}, "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) 是高精度的指令模型,适用于复杂计算。" }, @@ -846,6 +847,7 @@ "meta.llama3-8b-instruct-v1:0": { "description": "Meta Llama 3 是一款面向开发者、研究人员和企业的开放大型语言模型 (LLM),旨在帮助他们构建、实验并负责任地扩展他们的生成 AI 想法。作为全球社区创新的基础系统的一部分,它非常适合计算能力和资源有限、边缘设备和更快的训练时间。" }, + "microsoft/Phi-3.5-mini-instruct": {}, "microsoft/wizardlm 2-7b": { "description": "WizardLM 2 7B 是微软AI最新的快速轻量化模型,性能接近于现有开源领导模型的10倍。" }, @@ -883,10 +885,10 @@ "description": "Mistral (7B) Instruct 以高性能著称,适用于多种语言任务。" }, "mistralai/Mistral-7B-Instruct-v0.2": { - "description": "Mistral AI的指令调优模型" + "description": "Mistral (7B) Instruct v0.2 提供改进的指令处理能力和更精确的结果。" }, "mistralai/Mistral-7B-Instruct-v0.3": { - "description": "Mistral (7B) Instruct v0.3 提供高效的计算能力和自然语言理解,适合广泛的应用。" + "description": "Mistral AI的指令调优模型" }, "mistralai/Mistral-7B-v0.1": { "description": "Mistral 7B是一款紧凑但高性能的模型,擅长批量处理和简单任务,如分类和文本生成,具有良好的推理能力。" diff --git a/locales/zh-CN/setting.json b/locales/zh-CN/setting.json index f7bad12ca4f2..8091c7067bf6 100644 --- a/locales/zh-CN/setting.json +++ b/locales/zh-CN/setting.json @@ -98,6 +98,7 @@ "title": "使用客户端请求模式" }, "fetcher": { + "clear": "清除获取的模型", "fetch": "获取模型列表", "fetching": "正在获取模型列表...", "latestTime": "上次更新时间:{{time}}", diff --git a/locales/zh-TW/models.json b/locales/zh-TW/models.json index 05a99d3b60f2..93fbcfa318cf 100644 --- a/locales/zh-TW/models.json +++ b/locales/zh-TW/models.json @@ -94,6 +94,7 @@ "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO 是一款高度靈活的多模型合併,旨在提供卓越的創造性體驗。" }, + "NousResearch/Hermes-3-Llama-3.1-8B": {}, "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) 是高精度的指令模型,適用於複雜計算。" }, @@ -846,6 +847,7 @@ "meta.llama3-8b-instruct-v1:0": { "description": "Meta Llama 3 是一款面向開發者、研究人員和企業的開放大型語言模型 (LLM),旨在幫助他們構建、實驗並負責任地擴展他們的生成 AI 想法。作為全球社區創新的基礎系統的一部分,它非常適合計算能力和資源有限、邊緣設備和更快的訓練時間。" }, + "microsoft/Phi-3.5-mini-instruct": {}, "microsoft/wizardlm 2-7b": { "description": "WizardLM 2 7B 是微軟AI最新的快速輕量化模型,性能接近於現有開源領導模型的10倍。" }, diff --git a/locales/zh-TW/setting.json b/locales/zh-TW/setting.json index a56006c189a0..79b126e42ae9 100644 --- a/locales/zh-TW/setting.json +++ b/locales/zh-TW/setting.json @@ -98,6 +98,7 @@ "title": "使用客戶端請求模式" }, "fetcher": { + "clear": "清除獲取的模型", "fetch": "獲取模型列表", "fetching": "正在獲取模型列表...", "latestTime": "上次更新時間:{{time}}", diff --git a/src/app/(main)/settings/llm/components/ProviderModelList/ModelFetcher.tsx b/src/app/(main)/settings/llm/components/ProviderModelList/ModelFetcher.tsx index 3b12b6d0df3c..c10b7aafd963 100644 --- a/src/app/(main)/settings/llm/components/ProviderModelList/ModelFetcher.tsx +++ b/src/app/(main)/settings/llm/components/ProviderModelList/ModelFetcher.tsx @@ -1,8 +1,9 @@ -import { Icon, Tooltip } from '@lobehub/ui'; +import { ActionIcon, Icon, Tooltip } from '@lobehub/ui'; import { Typography } from 'antd'; import { createStyles } from 'antd-style'; import dayjs from 'dayjs'; -import { LucideLoaderCircle, LucideRefreshCcwDot } from 'lucide-react'; +import isEqual from 'fast-deep-equal'; +import { CircleX, LucideLoaderCircle, LucideRefreshCcwDot } from 'lucide-react'; import { memo } from 'react'; import { useTranslation } from 'react-i18next'; import { Flexbox } from 'react-layout-kit'; @@ -40,8 +41,9 @@ interface ModelFetcherProps { const ModelFetcher = memo(({ provider }) => { const { styles } = useStyles(); const { t } = useTranslation('setting'); - const [useFetchProviderModelList] = useUserStore((s) => [ + const [useFetchProviderModelList, clearObtainedModels] = useUserStore((s) => [ s.useFetchProviderModelList, + s.clearObtainedModels, s.setModelProviderConfig, ]); const enabledAutoFetch = useUserStore(modelConfigSelectors.isAutoFetchModelsEnabled(provider)); @@ -52,19 +54,34 @@ const ModelFetcher = memo(({ provider }) => { (s) => modelProviderSelectors.getModelCardsById(provider)(s).length, ); + const remoteModels = useUserStore( + modelProviderSelectors.remoteProviderModelCards(provider), + isEqual, + ); + const { mutate, isValidating } = useFetchProviderModelList(provider, enabledAutoFetch); return ( -
{t('llm.modelList.total', { count: totalModels })}
+
+ {t('llm.modelList.total', { count: totalModels })} + {remoteModels && remoteModels.length > 0 && ( + clearObtainedModels(provider)} + size={'small'} + title={t('llm.fetcher.clear')} + /> + )} +
diff --git a/src/app/layout.tsx b/src/app/layout.tsx index df39d0f4923f..ddd34bcf2731 100644 --- a/src/app/layout.tsx +++ b/src/app/layout.tsx @@ -22,11 +22,13 @@ const RootLayout = async ({ children, modal }: RootLayoutProps) => { const cookieStore = await cookies(); const lang = cookieStore.get(LOBE_LOCALE_COOKIE); - const direction = isRtlLang(lang?.value || DEFAULT_LANG) ? 'rtl' : 'ltr'; + const locale = lang?.value || DEFAULT_LANG; + + const direction = isRtlLang(locale) ? 'rtl' : 'ltr'; const mobile = isMobileDevice(); return ( - + diff --git a/src/locales/default/setting.ts b/src/locales/default/setting.ts index 6800285e4c69..f6a9418b5e43 100644 --- a/src/locales/default/setting.ts +++ b/src/locales/default/setting.ts @@ -100,6 +100,7 @@ export default { title: '使用客户端请求模式', }, fetcher: { + clear: '清除获取的模型', fetch: '获取模型列表', fetching: '正在获取模型列表...', latestTime: '上次更新时间:{{time}}', diff --git a/src/store/user/slices/modelList/action.ts b/src/store/user/slices/modelList/action.ts index b1b1cb3670ff..2a0ab9f3d4b8 100644 --- a/src/store/user/slices/modelList/action.ts +++ b/src/store/user/slices/modelList/action.ts @@ -20,6 +20,7 @@ import { modelProviderSelectors } from './selectors/modelProvider'; * 设置操作 */ export interface ModelListAction { + clearObtainedModels: (provider: GlobalLLMProviderKey) => Promise; dispatchCustomModelCards: ( provider: GlobalLLMProviderKey, payload: CustomModelCardDispatch, @@ -61,6 +62,13 @@ export const createModelListSlice: StateCreator< [], ModelListAction > = (set, get) => ({ + clearObtainedModels: async (provider: GlobalLLMProviderKey) => { + await get().setModelProviderConfig(provider, { + remoteModelCards: [], + }); + + get().refreshDefaultModelProviderList(); + }, dispatchCustomModelCards: async (provider, payload) => { const prevState = settingsSelectors.providerConfig(provider)(get()); @@ -86,7 +94,14 @@ export const createModelListSlice: StateCreator< ? modelProviderSelectors.remoteProviderModelCards(providerKey)(get()) : undefined; - return serverChatModels ?? remoteChatModels ?? providerCard.chatModels; + if (serverChatModels && serverChatModels.length > 0) { + return serverChatModels; + } + if (remoteChatModels && remoteChatModels.length > 0) { + return remoteChatModels; + } + + return providerCard.chatModels; }; const defaultModelProviderList = produce(DEFAULT_MODEL_PROVIDER_LIST, (draft) => {