Skip to content

Commit 55d7098

Browse files
committedJun 3, 2024·
Revert "typescript bindings maintenance (#2363)"
As discussed on Discord, this PR was not ready to be merged. CI fails on it. This reverts commit a602f7f. Signed-off-by: Jared Van Bortel <[email protected]>
1 parent a602f7f commit 55d7098

30 files changed

+872
-1111
lines changed
 

‎.circleci/continue_config.yml

+10-24
Original file line numberDiff line numberDiff line change
@@ -570,27 +570,22 @@ jobs:
570570
wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
571571
sudo dpkg -i cuda-keyring_1.1-1_all.deb
572572
sudo apt-get update
573-
sudo apt-get install -y cmake build-essential g++-12-aarch64-linux-gnu gcc-12-aarch64-linux-gnu vulkan-sdk cuda-compiler-12-4 libcublas-dev-12-4 libnvidia-compute-550-server libmysqlclient21 libodbc2 libpq5
573+
sudo apt-get install -y cmake build-essential vulkan-sdk cuda-compiler-12-4 libcublas-dev-12-4 libnvidia-compute-550-server libmysqlclient21 libodbc2 libpq5
574574
- run:
575575
name: Build Libraries
576576
command: |
577577
export PATH=$PATH:/usr/local/cuda/bin
578578
cd gpt4all-backend
579579
mkdir -p runtimes/build
580580
cd runtimes/build
581-
cmake ../.. -DCMAKE_BUILD_TYPE=Release
582-
cmake --build . --parallel
581+
cmake ../..
582+
cmake --build . --parallel --config Release
583583
mkdir ../linux-x64
584584
cp -L *.so ../linux-x64 # otherwise persist_to_workspace seems to mess symlinks
585-
cmake ../.. -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE="./toolchains/linux-arm64-toolchain.cmake"
586-
cmake --build . --parallel
587-
mkdir ../linux-arm64
588-
cp -L *.so ../linux-arm64
589585
- persist_to_workspace:
590586
root: gpt4all-backend
591587
paths:
592588
- runtimes/linux-x64/*.so
593-
- runtimes/linux-arm64/*.so
594589

595590
build-bindings-backend-macos:
596591
macos:
@@ -901,11 +896,6 @@ jobs:
901896
- checkout
902897
- attach_workspace:
903898
at: /tmp/gpt4all-backend
904-
- run:
905-
name: Install dependencies
906-
command: |
907-
sudo apt-get update
908-
sudo apt-get install -y g++-12-aarch64-linux-gnu gcc-12-aarch64-linux-gnu
909899
- node/install:
910900
install-yarn: true
911901
node-version: "18.16"
@@ -918,24 +908,18 @@ jobs:
918908
- run:
919909
command: |
920910
cd gpt4all-bindings/typescript
921-
yarn build:prebuilds
911+
yarn prebuildify -t 18.16.0 --napi
922912
- run:
923913
command: |
924914
mkdir -p gpt4all-backend/prebuilds/linux-x64
925915
mkdir -p gpt4all-backend/runtimes/linux-x64
926916
cp /tmp/gpt4all-backend/runtimes/linux-x64/*-*.so gpt4all-backend/runtimes/linux-x64
927917
cp gpt4all-bindings/typescript/prebuilds/linux-x64/*.node gpt4all-backend/prebuilds/linux-x64
928-
mkdir -p gpt4all-backend/prebuilds/linux-arm64
929-
mkdir -p gpt4all-backend/runtimes/linux-arm64
930-
cp /tmp/gpt4all-backend/runtimes/linux-arm64/*-*.so gpt4all-backend/runtimes/linux-arm64
931-
cp gpt4all-bindings/typescript/prebuilds/linux-arm64/*.node gpt4all-backend/prebuilds/linux-arm64
932918
- persist_to_workspace:
933919
root: gpt4all-backend
934920
paths:
935921
- prebuilds/linux-x64/*.node
936922
- runtimes/linux-x64/*-*.so
937-
- prebuilds/linux-arm64/*.node
938-
- runtimes/linux-arm64/*-*.so
939923
build-nodejs-macos:
940924
macos:
941925
xcode: "14.0.0"
@@ -1045,11 +1029,13 @@ jobs:
10451029
cp /tmp/gpt4all-backend/runtimes/darwin/*-*.* runtimes/darwin/native/
10461030
10471031
cp /tmp/gpt4all-backend/prebuilds/darwin-x64/*.node prebuilds/darwin-x64/
1032+
1033+
# Fallback build if user is not on above prebuilds
1034+
mv -f binding.ci.gyp binding.gyp
10481035
1049-
# copy the backend source we depend on to make fallback builds work
1050-
mkdir backend
1036+
mkdir gpt4all-backend
10511037
cd ../../gpt4all-backend
1052-
mv llmodel.h llmodel.cpp llmodel_c.cpp llmodel_c.h sysinfo.h dlhandle.h ../gpt4all-bindings/typescript/backend/
1038+
mv llmodel.h llmodel.cpp llmodel_c.cpp llmodel_c.h sysinfo.h dlhandle.h ../gpt4all-bindings/typescript/gpt4all-backend/
10531039
10541040
# Test install
10551041
- node/install-packages:
@@ -1059,7 +1045,7 @@ jobs:
10591045
- run:
10601046
command: |
10611047
cd gpt4all-bindings/typescript
1062-
yarn run test:ci
1048+
yarn run test
10631049
- run:
10641050
command: |
10651051
cd gpt4all-bindings/typescript

‎gpt4all-backend/CMakeLists.txt

-1
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,6 @@ if (LLMODEL_ROCM)
7979
endif()
8080

8181
set(CMAKE_VERBOSE_MAKEFILE ON)
82-
include(CheckCXXCompilerFlag)
8382

8483
# Go through each build variant
8584
foreach(BUILD_VARIANT IN LISTS BUILD_VARIANTS)

‎gpt4all-backend/toolchains/linux-arm64-toolchain.cmake

-11
This file was deleted.

‎gpt4all-bindings/typescript/.gitignore

-1
Original file line numberDiff line numberDiff line change
@@ -8,5 +8,4 @@ prebuilds/
88
!.yarn/sdks
99
!.yarn/versions
1010
runtimes/
11-
backend/
1211
compile_flags.txt
+1-2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
test/
22
spec/
3-
scripts/*
4-
!scripts/assert-backend-sources.js
3+
scripts/
54
build

‎gpt4all-bindings/typescript/README.md

+10-18
Original file line numberDiff line numberDiff line change
@@ -188,8 +188,6 @@ model.dispose();
188188
* python 3
189189
* On Windows and Linux, building GPT4All requires the complete Vulkan SDK. You may download it from here: https://vulkan.lunarg.com/sdk/home
190190
* macOS users do not need Vulkan, as GPT4All will use Metal instead.
191-
* CUDA Toolkit >= 11.4 (you can bypass this with adding a custom flag to build step)
192-
- Windows: There is difficulty compiling with cuda if the Visual Studio IDE is NOT present.
193191

194192
### Build (from source)
195193

@@ -198,29 +196,23 @@ git clone https://github.com/nomic-ai/gpt4all.git
198196
cd gpt4all-bindings/typescript
199197
```
200198

201-
llama.cpp git submodule for gpt4all can be possibly absent or outdated. Make sure to run
199+
* The below shell commands assume the current working directory is `typescript`.
200+
201+
* To Build and Rebuild:
202202

203203
```sh
204-
git submodule update --init --recursive
204+
node scripts/prebuild.js
205205
```
206-
207-
The below shell commands assume the current working directory is `typescript`.
208-
209-
Using yarn
206+
* llama.cpp git submodule for gpt4all can be possibly absent. If this is the case, make sure to run in llama.cpp parent directory
210207

211208
```sh
212-
yarn install
213-
yarn build
209+
git submodule update --init --recursive
214210
```
215211

216-
Using npm
217-
218212
```sh
219-
npm install
220-
npm run build
213+
yarn build:backend
221214
```
222-
223-
The `build:runtimes` script will create runtime libraries for your platform in `runtimes` and `build:prebuilds` will create the bindings in `prebuilds`. `build` is a shortcut for both.
215+
This will build platform-dependent dynamic libraries, and will be located in runtimes/(platform)/native
224216

225217
### Test
226218

@@ -267,7 +259,7 @@ yarn test
267259

268260
This package has been stabilizing over time development, and breaking changes may happen until the api stabilizes. Here's what's the todo list:
269261

270-
* \[x] [Purely offline](#Offline-usage). Per the gui, which can be run completely offline, the bindings should be as well.
262+
* \[ ] Purely offline. Per the gui, which can be run completely offline, the bindings should be as well.
271263
* \[ ] NPM bundle size reduction via optionalDependencies strategy (need help)
272264
* Should include prebuilds to avoid painful node-gyp errors
273265
* \[x] createChatSession ( the python equivalent to create\_chat\_session )
@@ -284,7 +276,7 @@ This package has been stabilizing over time development, and breaking changes ma
284276
This repository serves as the new bindings for nodejs users.
285277
- If you were a user of [these bindings](https://github.com/nomic-ai/gpt4all-ts), they are outdated.
286278
- Version 4 includes the follow breaking changes
287-
* `createEmbedding` & `EmbeddingModel.embed()` returns an object, `EmbeddingResult`, instead of a Float32Array.
279+
* `createEmbedding` & `EmbeddingModel.embed()` returns an object, `EmbeddingResult`, instead of a float32array.
288280
* Removed deprecated types `ModelType` and `ModelFile`
289281
* Removed deprecated initiation of model by string path only
290282

+62
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
{
2+
"targets": [
3+
{
4+
"target_name": "gpt4all", # gpt4all-ts will cause compile error
5+
"include_dirs": [
6+
"<!@(node -p \"require('node-addon-api').include\")",
7+
"gpt4all-backend",
8+
],
9+
"sources": [
10+
# PREVIOUS VERSION: had to required the sources, but with newest changes do not need to
11+
#"../../gpt4all-backend/llama.cpp/examples/common.cpp",
12+
#"../../gpt4all-backend/llama.cpp/ggml.c",
13+
#"../../gpt4all-backend/llama.cpp/llama.cpp",
14+
# "../../gpt4all-backend/utils.cpp",
15+
"gpt4all-backend/llmodel_c.cpp",
16+
"gpt4all-backend/llmodel.cpp",
17+
"prompt.cc",
18+
"index.cc",
19+
],
20+
"conditions": [
21+
['OS=="mac"', {
22+
'xcode_settings': {
23+
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES'
24+
},
25+
'defines': [
26+
'LIB_FILE_EXT=".dylib"',
27+
'NAPI_CPP_EXCEPTIONS',
28+
],
29+
'cflags_cc': [
30+
"-fexceptions"
31+
]
32+
}],
33+
['OS=="win"', {
34+
'defines': [
35+
'LIB_FILE_EXT=".dll"',
36+
'NAPI_CPP_EXCEPTIONS',
37+
],
38+
"msvs_settings": {
39+
"VCCLCompilerTool": {
40+
"AdditionalOptions": [
41+
"/std:c++20",
42+
"/EHsc",
43+
],
44+
},
45+
},
46+
}],
47+
['OS=="linux"', {
48+
'defines': [
49+
'LIB_FILE_EXT=".so"',
50+
'NAPI_CPP_EXCEPTIONS',
51+
],
52+
'cflags_cc!': [
53+
'-fno-rtti',
54+
],
55+
'cflags_cc': [
56+
'-std=c++2a',
57+
'-fexceptions'
58+
]
59+
}]
60+
]
61+
}]
62+
}

‎gpt4all-bindings/typescript/binding.gyp

+9-5
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,19 @@
11
{
22
"targets": [
33
{
4-
"target_name": "gpt4all",
4+
"target_name": "gpt4all", # gpt4all-ts will cause compile error
55
"include_dirs": [
66
"<!@(node -p \"require('node-addon-api').include\")",
7-
"backend",
7+
"../../gpt4all-backend",
88
],
99
"sources": [
10-
"backend/llmodel_c.cpp",
11-
"backend/llmodel.cpp",
12-
"backend/dlhandle.cpp",
10+
# PREVIOUS VERSION: had to required the sources, but with newest changes do not need to
11+
#"../../gpt4all-backend/llama.cpp/examples/common.cpp",
12+
#"../../gpt4all-backend/llama.cpp/ggml.c",
13+
#"../../gpt4all-backend/llama.cpp/llama.cpp",
14+
# "../../gpt4all-backend/utils.cpp",
15+
"../../gpt4all-backend/llmodel_c.cpp",
16+
"../../gpt4all-backend/llmodel.cpp",
1317
"prompt.cc",
1418
"index.cc",
1519
],

0 commit comments

Comments
 (0)
Please sign in to comment.