From 92acc7b3acb925fcb4d4f969f9aab7585de99372 Mon Sep 17 00:00:00 2001
From: Jared Van Bortel <jared@nomic.ai>
Date: Tue, 3 Dec 2024 19:36:53 -0500
Subject: [PATCH] Fixups for Jinja PR (#3215)

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
---
 .circleci/continue_config.yml    |  2 +-
 .gitmodules                      |  3 +++
 gpt4all-chat/CMakeLists.txt      | 13 +++++++++----
 gpt4all-chat/deps/CMakeLists.txt |  7 +++++++
 gpt4all-chat/deps/Jinja2Cpp      |  2 +-
 gpt4all-chat/deps/rapidjson      |  1 +
 gpt4all-chat/src/chat.cpp        |  7 +++----
 gpt4all-chat/src/chatmodel.h     | 15 ++++-----------
 gpt4all-chat/src/modellist.cpp   |  2 +-
 gpt4all-chat/src/server.cpp      |  2 +-
 10 files changed, 31 insertions(+), 23 deletions(-)
 create mode 160000 gpt4all-chat/deps/rapidjson

diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml
index 1ca880307d72..79bcc21c841d 100644
--- a/.circleci/continue_config.yml
+++ b/.circleci/continue_config.yml
@@ -495,7 +495,7 @@ jobs:
             mkdir dotnet
             cd dotnet
             $dotnet_url="https://download.visualstudio.microsoft.com/download/pr/5af098e1-e433-4fda-84af-3f54fd27c108/6bd1c6e48e64e64871957289023ca590/dotnet-sdk-8.0.302-win-x64.zip"
-            wget "$dotnet_url"
+            wget.exe "$dotnet_url"
             Expand-Archive -LiteralPath .\dotnet-sdk-8.0.302-win-x64.zip
             $Env:DOTNET_ROOT="$($(Get-Location).Path)\dotnet-sdk-8.0.302-win-x64"
             $Env:PATH="$Env:DOTNET_ROOT;$Env:PATH"
diff --git a/.gitmodules b/.gitmodules
index c177a0e12f19..0f46dd904c3b 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -20,3 +20,6 @@
 [submodule "gpt4all-chat/deps/Jinja2Cpp"]
 	path = gpt4all-chat/deps/Jinja2Cpp
 	url = https://github.com/nomic-ai/jinja2cpp.git
+[submodule "gpt4all-chat/deps/rapidjson"]
+	path = gpt4all-chat/deps/rapidjson
+	url = https://github.com/nomic-ai/rapidjson.git
diff --git a/gpt4all-chat/CMakeLists.txt b/gpt4all-chat/CMakeLists.txt
index 257338b9510c..09b4691dc546 100644
--- a/gpt4all-chat/CMakeLists.txt
+++ b/gpt4all-chat/CMakeLists.txt
@@ -33,6 +33,10 @@ option(GPT4ALL_SIGN_INSTALL "Sign installed binaries and installers (requires si
 set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
 set(CMAKE_CXX_STANDARD 23)
 set(CMAKE_CXX_STANDARD_REQUIRED ON)
+if (MSVC)
+    # Enable accurate __cplusplus macro to fix errors in Jinja2Cpp
+    add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/Zc:__cplusplus>)
+endif()
 
 
 # conftests
@@ -98,6 +102,10 @@ message(STATUS "Qt 6 root directory: ${Qt6_ROOT_DIR}")
 
 set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
 
+if (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
+  set(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR}/install CACHE PATH "..." FORCE)
+endif()
+
 add_subdirectory(deps)
 add_subdirectory(../gpt4all-backend llmodel)
 
@@ -398,10 +406,6 @@ endif()
 
 set(COMPONENT_NAME_MAIN ${PROJECT_NAME})
 
-if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
-  set(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR}/install CACHE PATH "..." FORCE)
-endif()
-
 install(TARGETS chat DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN})
 
 install(
@@ -514,6 +518,7 @@ elseif(${CMAKE_SYSTEM_NAME} MATCHES Darwin)
     set(CPACK_BUNDLE_ICON "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.icns")
 endif()
 
+set(CPACK_COMPONENTS_ALL gpt4all)  # exclude development components
 set(CPACK_PACKAGE_INSTALL_DIRECTORY ${COMPONENT_NAME_MAIN})
 set(CPACK_PACKAGE_VERSION_MAJOR ${PROJECT_VERSION_MAJOR})
 set(CPACK_PACKAGE_VERSION_MINOR ${PROJECT_VERSION_MINOR})
diff --git a/gpt4all-chat/deps/CMakeLists.txt b/gpt4all-chat/deps/CMakeLists.txt
index 495ff313ce6d..04b0e7616bbb 100644
--- a/gpt4all-chat/deps/CMakeLists.txt
+++ b/gpt4all-chat/deps/CMakeLists.txt
@@ -12,4 +12,11 @@ add_subdirectory(DuckX)
 set(QT_VERSION_MAJOR 6)
 add_subdirectory(QXlsx/QXlsx)
 
+# forked dependency of Jinja2Cpp
+set(RAPIDJSON_BUILD_DOC                  OFF)
+set(RAPIDJSON_BUILD_EXAMPLES             OFF)
+set(RAPIDJSON_BUILD_TESTS                OFF)
+set(RAPIDJSON_ENABLE_INSTRUMENTATION_OPT OFF)
+add_subdirectory(rapidjson)
+
 add_subdirectory(Jinja2Cpp)
diff --git a/gpt4all-chat/deps/Jinja2Cpp b/gpt4all-chat/deps/Jinja2Cpp
index b2a716798bfa..bcf2f82ae120 160000
--- a/gpt4all-chat/deps/Jinja2Cpp
+++ b/gpt4all-chat/deps/Jinja2Cpp
@@ -1 +1 @@
-Subproject commit b2a716798bfa63c7dae303fc1e272964c4e1f9ee
+Subproject commit bcf2f82ae120f0a71c114ecb64a63ab5fe1ffc79
diff --git a/gpt4all-chat/deps/rapidjson b/gpt4all-chat/deps/rapidjson
new file mode 160000
index 000000000000..9b547ef4bd86
--- /dev/null
+++ b/gpt4all-chat/deps/rapidjson
@@ -0,0 +1 @@
+Subproject commit 9b547ef4bd86210ef084abc2790bd1ddfe66b592
diff --git a/gpt4all-chat/src/chat.cpp b/gpt4all-chat/src/chat.cpp
index c40bb96ed34b..d9d4bc1c5751 100644
--- a/gpt4all-chat/src/chat.cpp
+++ b/gpt4all-chat/src/chat.cpp
@@ -147,11 +147,10 @@ void Chat::newPromptResponsePair(const QString &prompt, const QList<QUrl> &attac
         promptPlusAttached = attachedContexts.join("\n\n") + "\n\n" + prompt;
 
     resetResponseState();
-    qsizetype prevMsgIndex = m_chatModel->count() - 1;
-    if (prevMsgIndex >= 0)
-        m_chatModel->updateCurrentResponse(prevMsgIndex, false);
+    if (int count = m_chatModel->count())
+        m_chatModel->updateCurrentResponse(count - 1, false);
     m_chatModel->appendPrompt(prompt, attachments);
-    m_chatModel->appendResponse(prevMsgIndex + 1);
+    m_chatModel->appendResponse();
 
     emit promptRequested(m_collections);
     m_needsSave = true;
diff --git a/gpt4all-chat/src/chatmodel.h b/gpt4all-chat/src/chatmodel.h
index 7ce6b0e884ad..324de5b619ab 100644
--- a/gpt4all-chat/src/chatmodel.h
+++ b/gpt4all-chat/src/chatmodel.h
@@ -352,7 +352,7 @@ class ChatModel : public QAbstractListModel
         emit countChanged();
     }
 
-    void appendResponse(int promptIndex)
+    void appendResponse()
     {
         qsizetype count;
         {
@@ -362,17 +362,13 @@ class ChatModel : public QAbstractListModel
             count = m_chatItems.count();
         }
 
+        int promptIndex = 0;
         beginInsertRows(QModelIndex(), count, count);
         {
             QMutexLocker locker(&m_mutex);
-            if (promptIndex >= 0) {
-                if (promptIndex >= m_chatItems.size())
-                    throw std::out_of_range(fmt::format("index {} is out of range", promptIndex));
-                auto &promptItem = m_chatItems[promptIndex];
-                if (promptItem.type() != ChatItem::Type::Prompt)
-                    throw std::invalid_argument(fmt::format("item at index {} is not a prompt", promptIndex));
-            }
             m_chatItems.emplace_back(ChatItem::response_tag, promptIndex);
+            if (auto pi = getPeerUnlocked(m_chatItems.size() - 1))
+                promptIndex = *pi;
         }
         endInsertRows();
         emit countChanged();
@@ -394,7 +390,6 @@ class ChatModel : public QAbstractListModel
         qsizetype endIndex  = startIndex + nNewItems;
         beginInsertRows(QModelIndex(), startIndex, endIndex - 1 /*inclusive*/);
         bool hadError;
-        int promptIndex;
         {
             QMutexLocker locker(&m_mutex);
             hadError = hasErrorUnlocked();
@@ -408,8 +403,6 @@ class ChatModel : public QAbstractListModel
         // Server can add messages when there is an error because each call is a new conversation
         if (hadError)
             emit hasErrorChanged(false);
-        if (promptIndex >= 0)
-            emit dataChanged(createIndex(promptIndex, 0), createIndex(promptIndex, 0), {PeerRole});
     }
 
     void truncate(qsizetype size)
diff --git a/gpt4all-chat/src/modellist.cpp b/gpt4all-chat/src/modellist.cpp
index 4798c69bdd8e..a11229a6ae35 100644
--- a/gpt4all-chat/src/modellist.cpp
+++ b/gpt4all-chat/src/modellist.cpp
@@ -318,7 +318,7 @@ void ModelInfo::setRepeatPenaltyTokens(int t)
 
 QVariant ModelInfo::defaultChatTemplate() const
 {
-    auto res = m_chatTemplate.or_else([this] -> std::optional<QString> {
+    auto res = m_chatTemplate.or_else([this]() -> std::optional<QString> {
         if (!installed || isOnline)
             return std::nullopt;
         if (!m_modelChatTemplate) {
diff --git a/gpt4all-chat/src/server.cpp b/gpt4all-chat/src/server.cpp
index 2435d43b3c9e..4c48dcbdbf82 100644
--- a/gpt4all-chat/src/server.cpp
+++ b/gpt4all-chat/src/server.cpp
@@ -668,7 +668,7 @@ auto Server::handleCompletionRequest(const CompletionRequest &request)
 
     // add prompt/response items to GUI
     m_chatModel->appendPrompt(request.prompt);
-    m_chatModel->appendResponse(prevMsgIndex + 1);
+    m_chatModel->appendResponse();
 
     // FIXME(jared): taking parameters from the UI inhibits reproducibility of results
     LLModel::PromptContext promptCtx {