Skip to content

Commit

Permalink
Update protobuf version to 3.20.x
Browse files Browse the repository at this point in the history
Signed-off-by: Kevin Chen <[email protected]>
  • Loading branch information
kevinch-nv committed Aug 17, 2022
1 parent 04a033f commit ec056c8
Show file tree
Hide file tree
Showing 9 changed files with 28 additions and 15 deletions.
2 changes: 1 addition & 1 deletion .gitmodules
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[submodule "third_party/protobuf"]
path = third_party/protobuf
url = https://github.com/protocolbuffers/protobuf.git
branch = 3.8.x
branch = 3.20.x
[submodule "third_party/cub"]
path = third_party/cub
url = https://github.com/NVlabs/cub.git
Expand Down
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ endif()

set(DEFAULT_CUDA_VERSION 11.3.1)
set(DEFAULT_CUDNN_VERSION 8.2)
set(DEFAULT_PROTOBUF_VERSION 3.0.0)
set(DEFAULT_PROTOBUF_VERSION 3.20.1)

# Dependency Version Resolution
set_ifndef(CUDA_VERSION ${DEFAULT_CUDA_VERSION})
Expand Down
10 changes: 5 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ Need enterprise support? NVIDIA global support is available for TensorRT with th
To build the TensorRT-OSS components, you will first need the following software packages.

**TensorRT GA build**
* [TensorRT](https://developer.nvidia.com/nvidia-tensorrt-download) v8.4.1.5
* [TensorRT](https://developer.nvidia.com/nvidia-tensorrt-download) v8.4.2.4

**System Packages**
* [CUDA](https://developer.nvidia.com/cuda-toolkit)
Expand Down Expand Up @@ -71,16 +71,16 @@ To build the TensorRT-OSS components, you will first need the following software

```bash
cd ~/Downloads
tar -xvzf TensorRT-8.4.1.5.Linux.x86_64-gnu.cuda-11.6.cudnn8.4.tar.gz
export TRT_LIBPATH=`pwd`/TensorRT-8.4.1.5
tar -xvzf TensorRT-8.4.2.4.Linux.x86_64-gnu.cuda-11.6.cudnn8.4.tar.gz
export TRT_LIBPATH=`pwd`/TensorRT-8.4.2.4
```

**Example: Windows on x86-64 with cuda-11.4**

```powershell
cd ~\Downloads
Expand-Archive .\TensorRT-8.4.1.5.Windows10.x86_64.cuda-11.6.cudnn8.4.zip
$Env:TRT_LIBPATH = '$(Get-Location)\TensorRT-8.4.1.5'
Expand-Archive .\TensorRT-8.4.2.4.Windows10.x86_64.cuda-11.6.cudnn8.4.zip
$Env:TRT_LIBPATH = '$(Get-Location)\TensorRT-8.4.2.4'
$Env:PATH += 'C:\Program Files (x86)\Microsoft Visual Studio\2017\Professional\MSBuild\15.0\Bin\'
```

Expand Down
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
8.4.1.5
8.4.2.4
2 changes: 1 addition & 1 deletion docker/ubuntu-20.04.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ ARG OS_VERSION=20.04
FROM nvidia/cuda:${CUDA_VERSION}-cudnn8-devel-ubuntu${OS_VERSION}
LABEL maintainer="NVIDIA CORPORATION"

ENV TRT_VERSION 8.4.1.5
ENV TRT_VERSION 8.4.2.4
SHELL ["/bin/bash", "-c"]

# Setup user account
Expand Down
2 changes: 1 addition & 1 deletion docker/ubuntu-cross-aarch64.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ ARG OS_VERSION=20.04
FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${OS_VERSION}
LABEL maintainer="NVIDIA CORPORATION"

ENV TRT_VERSION 8.4.1.5
ENV TRT_VERSION 8.4.2.4
ENV DEBIAN_FRONTEND=noninteractive

ARG uid=1000
Expand Down
13 changes: 11 additions & 2 deletions parsers/caffe/caffeParser/caffeParser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -328,8 +328,12 @@ const IBlobNameToTensor* CaffeParser::parseBuffers(const uint8_t* deployBuffer,
mModel = std::unique_ptr<trtcaffe::NetParameter>(new trtcaffe::NetParameter);
google::protobuf::io::ArrayInputStream modelStream(modelBuffer, modelLength);
google::protobuf::io::CodedInputStream codedModelStream(&modelStream);
#if GOOGLE_PROTOBUF_VERSION >= 3011000
codedModelStream.SetTotalBytesLimit(modelLength);
#else
// Note: This WARs the very low default size limit (64MB)
codedModelStream.SetTotalBytesLimit(modelLength, -1);

#endif
if (!mModel->ParseFromCodedStream(&codedModelStream))
{
RETURN_AND_LOG_ERROR(nullptr, "Could not parse model file");
Expand Down Expand Up @@ -625,7 +629,12 @@ IBinaryProtoBlob* CaffeParser::parseBinaryProto(const char* fileName) noexcept

IstreamInputStream rawInput(&stream);
CodedInputStream codedInput(&rawInput);
codedInput.SetTotalBytesLimit(INT_MAX, -1);
#if GOOGLE_PROTOBUF_VERSION >= 3011000
codedInput.SetTotalBytesLimit(INT_MAX);
#else
// Note: This WARs the very low default size limit (64MB)
codedInput.SetTotalBytesLimit(INT_MAX, -1);
#endif

trtcaffe::BlobProto blob;
bool ok = blob.ParseFromCodedStream(&codedInput);
Expand Down
8 changes: 6 additions & 2 deletions parsers/caffe/caffeParser/readProto.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,12 @@ bool readBinaryProto(trtcaffe::NetParameter* net, const char* file, size_t bufSi

IstreamInputStream rawInput(&stream);
CodedInputStream codedInput(&rawInput);
codedInput.SetTotalBytesLimit(int(bufSize), -1);

#if GOOGLE_PROTOBUF_VERSION >= 3011000
codedInput.SetTotalBytesLimit(int(bufSize));
#else
// Note: This WARs the very low default size limit (64MB)
codedInput.SetTotalBytesLimit(int(bufSize), -1);
#endif
bool ok = net->ParseFromCodedStream(&codedInput);
stream.close();

Expand Down
2 changes: 1 addition & 1 deletion third_party/protobuf
Submodule protobuf updated 1950 files

0 comments on commit ec056c8

Please sign in to comment.