diff --git a/gpt4all-chat/CHANGELOG.md b/gpt4all-chat/CHANGELOG.md index d5a8856dc788..0eb98d8e0fa7 100644 --- a/gpt4all-chat/CHANGELOG.md +++ b/gpt4all-chat/CHANGELOG.md @@ -9,6 +9,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). ### Changed - Update Italian translation ([#3236](https://github.com/nomic-ai/gpt4all/pull/3236)) +### Fixed +- Fix a few more problems with the Jinja changes ([#3239](https://github.com/nomic-ai/gpt4all/pull/3239)) + ## [3.5.0-rc2] - 2024-12-06 ### Changed diff --git a/gpt4all-chat/qml/ChatItemView.qml b/gpt4all-chat/qml/ChatItemView.qml index 65ffe53fc093..e6a48bbc108e 100644 --- a/gpt4all-chat/qml/ChatItemView.qml +++ b/gpt4all-chat/qml/ChatItemView.qml @@ -599,7 +599,14 @@ GridLayout { Layout.fillWidth: false name: editingDisabledReason ?? qsTr("Redo") source: "qrc:/gpt4all/icons/regenerate.svg" - onClicked: redoResponseDialog.open() + onClicked: { + if (index == chatModel.count - 1) { + // regenerate last message without confirmation + currentChat.regenerateResponse(index); + return; + } + redoResponseDialog.open(); + } } ChatMessageButton { diff --git a/gpt4all-chat/src/chatllm.cpp b/gpt4all-chat/src/chatllm.cpp index 7841b9460e99..f575ac2d4f11 100644 --- a/gpt4all-chat/src/chatllm.cpp +++ b/gpt4all-chat/src/chatllm.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -70,19 +71,23 @@ static jinja2::TemplateEnv *jinjaEnv() settings.lstripBlocks = true; env.AddGlobal("raise_exception", jinja2::UserCallable( /*callable*/ [](auto ¶ms) -> jinja2::Value { - auto &message = params.args.at("message").asString(); - throw std::runtime_error(fmt::format("Jinja template error: {}", message)); + auto messageArg = params.args.find("message"); + if (messageArg == params.args.end() || !messageArg->second.isString()) + throw std::runtime_error("'message' argument to raise_exception() must be a string"); + throw std::runtime_error(fmt::format("Jinja template error: {}", messageArg->second.asString())); }, /*argsInfo*/ { jinja2::ArgInfo("message", /*isMandatory*/ true) } )); env.AddGlobal("strftime_now", jinja2::UserCallable( /*callable*/ [](auto ¶ms) -> jinja2::Value { using Clock = std::chrono::system_clock; - auto &format = params.args.at("format").asString(); + auto formatArg = params.args.find("format"); + if (formatArg == params.args.end() || !formatArg->second.isString()) + throw std::runtime_error("'format' argument to strftime_now() must be a string"); time_t nowUnix = Clock::to_time_t(Clock::now()); auto localDate = *std::localtime(&nowUnix); std::ostringstream ss; - ss << std::put_time(&localDate, format.c_str()); + ss << std::put_time(&localDate, formatArg->second.asString().c_str()); return ss.str(); }, /*argsInfo*/ { jinja2::ArgInfo("format", /*isMandatory*/ true) } @@ -923,7 +928,7 @@ auto ChatLLM::promptInternal( if (auto limit = nCtx - 4; lastMessageLength > limit) { throw std::invalid_argument( tr("Your message was too long and could not be processed (%1 > %2). " - "Please try again with something shorter.").arg(lastMessageLength, limit).toUtf8().constData() + "Please try again with something shorter.").arg(lastMessageLength).arg(limit).toUtf8().constData() ); } } diff --git a/gpt4all-chat/src/modellist.cpp b/gpt4all-chat/src/modellist.cpp index a11229a6ae35..99b515eeff71 100644 --- a/gpt4all-chat/src/modellist.cpp +++ b/gpt4all-chat/src/modellist.cpp @@ -47,8 +47,33 @@ using namespace Qt::Literals::StringLiterals; #define MODELS_JSON_VERSION "3" + static const QStringList FILENAME_BLACKLIST { u"gpt4all-nomic-embed-text-v1.rmodel"_s }; +static const QString RMODEL_CHAT_TEMPLATE = uR"( +{%- set loop_messages = messages %} +{%- for message in loop_messages %} + {{- raise_exception('Unknown role: ' + messages['role']) }} + {{- '<' + message['role'] + '>' }} + {%- if message['role'] == 'user' %} + {%- for source in message.sources %} + {%- if loop.first %} + {{- '### Context:\n' }} + {%- endif %} + {{- 'Collection: ' + source.collection + '\n' + + 'Path: ' + source.path + '\n' + + 'Excerpt: ' + source.text + '\n\n' }} + {%- endfor %} + {%- endif %} + {%- for attachment in message.prompt_attachments %} + {{- attachment.processed_content + '\n\n' }} + {%- endfor %} + {{- message.content }} + {{- '' }} +{%- endfor %} +)"_s; + + QString ModelInfo::id() const { return m_id; @@ -1367,6 +1392,7 @@ void ModelList::processModelDirectory(const QString &path) // The description is hard-coded into "GPT4All.ini" due to performance issue. // If the description goes to be dynamic from its .rmodel file, it will get high I/O usage while using the ModelList. data.append({ DescriptionRole, description }); + data.append({ ChatTemplateRole, RMODEL_CHAT_TEMPLATE }); } updateData(id, data); } @@ -1655,7 +1681,8 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save) { ModelList::ParametersRole, "?" }, { ModelList::QuantRole, "NA" }, { ModelList::TypeRole, "GPT" }, - { ModelList::UrlRole, "https://api.openai.com/v1/chat/completions"}, + { ModelList::UrlRole, "https://api.openai.com/v1/chat/completions" }, + { ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE }, }; updateData(id, data); } @@ -1683,7 +1710,8 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save) { ModelList::ParametersRole, "?" }, { ModelList::QuantRole, "NA" }, { ModelList::TypeRole, "GPT" }, - { ModelList::UrlRole, "https://api.openai.com/v1/chat/completions"}, + { ModelList::UrlRole, "https://api.openai.com/v1/chat/completions" }, + { ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE }, }; updateData(id, data); } @@ -1714,7 +1742,8 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save) { ModelList::ParametersRole, "?" }, { ModelList::QuantRole, "NA" }, { ModelList::TypeRole, "Mistral" }, - { ModelList::UrlRole, "https://api.mistral.ai/v1/chat/completions"}, + { ModelList::UrlRole, "https://api.mistral.ai/v1/chat/completions" }, + { ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE }, }; updateData(id, data); } @@ -1739,7 +1768,8 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save) { ModelList::ParametersRole, "?" }, { ModelList::QuantRole, "NA" }, { ModelList::TypeRole, "Mistral" }, - { ModelList::UrlRole, "https://api.mistral.ai/v1/chat/completions"}, + { ModelList::UrlRole, "https://api.mistral.ai/v1/chat/completions" }, + { ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE }, }; updateData(id, data); } @@ -1765,7 +1795,8 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save) { ModelList::ParametersRole, "?" }, { ModelList::QuantRole, "NA" }, { ModelList::TypeRole, "Mistral" }, - { ModelList::UrlRole, "https://api.mistral.ai/v1/chat/completions"}, + { ModelList::UrlRole, "https://api.mistral.ai/v1/chat/completions" }, + { ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE }, }; updateData(id, data); } @@ -1794,6 +1825,7 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save) { ModelList::ParametersRole, "?" }, { ModelList::QuantRole, "NA" }, { ModelList::TypeRole, "NA" }, + { ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE }, }; updateData(id, data); }